HBASE-7721 Atomic multi-row mutations in META

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1454498 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Enis Soztutar 2013-03-08 19:10:32 +00:00
parent 37831d99a2
commit 2cfb9cae5d
19 changed files with 348 additions and 572 deletions

View File

@ -18,23 +18,6 @@
*/
package org.apache.hadoop.hbase;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
@ -51,10 +34,28 @@ import java.util.TreeMap;
import java.util.TreeSet;
import java.util.regex.Matcher;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.WritableComparable;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* HTableDescriptor contains the details about an HBase table such as the descriptors of
* all the column families, is the table a catalog table, <code> -ROOT- </code> or
* <code> .META. </code>, is the table is read only, the maximum size of the memstore,
* all the column families, is the table a catalog table, <code> -ROOT- </code> or
* <code> .META. </code>, if the table is read only, the maximum size of the memstore,
* when the region split should occur, coprocessors associated with it etc...
*/
@InterfaceAudience.Public
@ -76,7 +77,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
private String nameAsString = "";
/**
* A map which holds the metadata information of the table. This metadata
* A map which holds the metadata information of the table. This metadata
* includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
* MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
*/
@ -93,10 +94,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
public static final String SPLIT_POLICY = "SPLIT_POLICY";
/**
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
* attribute which denotes the maximum size of the store file after which
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
* attribute which denotes the maximum size of the store file after which
* a region split occurs
*
*
* @see #getMaxFileSize()
*/
public static final String MAX_FILESIZE = "MAX_FILESIZE";
@ -108,9 +109,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
new ImmutableBytesWritable(Bytes.toBytes(OWNER));
/**
* <em>INTERNAL</em> Used by rest interface to access this metadata
* <em>INTERNAL</em> Used by rest interface to access this metadata
* attribute which denotes if the table is Read Only
*
*
* @see #isReadOnly()
*/
public static final String READONLY = "READONLY";
@ -118,10 +119,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
new ImmutableBytesWritable(Bytes.toBytes(READONLY));
/**
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
* attribute which represents the maximum size of the memstore after which
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
* attribute which represents the maximum size of the memstore after which
* its contents are flushed onto the disk
*
*
* @see #getMemStoreFlushSize()
*/
public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
@ -129,9 +130,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
/**
* <em>INTERNAL</em> Used by rest interface to access this metadata
* <em>INTERNAL</em> Used by rest interface to access this metadata
* attribute which denotes if the table is a -ROOT- region or not
*
*
* @see #isRootRegion()
*/
public static final String IS_ROOT = "IS_ROOT";
@ -139,10 +140,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
/**
* <em>INTERNAL</em> Used by rest interface to access this metadata
* <em>INTERNAL</em> Used by rest interface to access this metadata
* attribute which denotes if it is a catalog table, either
* <code> .META. </code> or <code> -ROOT- </code>
*
*
* @see #isMetaRegion()
*/
public static final String IS_META = "IS_META";
@ -150,7 +151,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
new ImmutableBytesWritable(Bytes.toBytes(IS_META));
/**
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
* attribute which denotes if the deferred log flush option is enabled
*/
public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
@ -176,7 +177,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
public static final boolean DEFAULT_READONLY = false;
/**
* Constant that denotes the maximum default size of the memstore after which
* Constant that denotes the maximum default size of the memstore after which
* the contents are flushed to the store files
*/
public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
@ -313,9 +314,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/*
* Set meta flags on this table.
* Set meta flags on this table.
* IS_ROOT_KEY is set if its a -ROOT- table
* IS_META_KEY is set either if its a -ROOT- or a .META. table
* IS_META_KEY is set either if its a -ROOT- or a .META. table
* Called by constructors.
* @param name
*/
@ -327,8 +328,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Check if the descriptor represents a <code> -ROOT- </code> region.
*
* @return true if this is a <code> -ROOT- </code> region
*
* @return true if this is a <code> -ROOT- </code> region
*/
public boolean isRootRegion() {
if (this.root == null) {
@ -338,11 +339,11 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* <em> INTERNAL </em> Used to denote if the current table represents
* <code> -ROOT- </code> region. This is used internally by the
* HTableDescriptor constructors
*
* @param isRoot true if this is the <code> -ROOT- </code> region
* <em> INTERNAL </em> Used to denote if the current table represents
* <code> -ROOT- </code> region. This is used internally by the
* HTableDescriptor constructors
*
* @param isRoot true if this is the <code> -ROOT- </code> region
*/
protected void setRootRegion(boolean isRoot) {
// TODO: Make the value a boolean rather than String of boolean.
@ -351,10 +352,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Checks if this table is either <code> -ROOT- </code> or <code> .META. </code>
* region.
*
* @return true if this is either a <code> -ROOT- </code> or <code> .META. </code>
* region
* region.
*
* @return true if this is either a <code> -ROOT- </code> or <code> .META. </code>
* region
*/
public boolean isMetaRegion() {
if (this.meta == null) {
@ -379,31 +380,31 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* <em> INTERNAL </em> Used to denote if the current table represents
* <code> -ROOT- </code> or <code> .META. </code> region. This is used
* internally by the HTableDescriptor constructors
*
* @param isMeta true if its either <code> -ROOT- </code> or
* <code> .META. </code> region
* <em> INTERNAL </em> Used to denote if the current table represents
* <code> -ROOT- </code> or <code> .META. </code> region. This is used
* internally by the HTableDescriptor constructors
*
* @param isMeta true if its either <code> -ROOT- </code> or
* <code> .META. </code> region
*/
protected void setMetaRegion(boolean isMeta) {
setValue(IS_META_KEY, isMeta? TRUE: FALSE);
}
/**
* Checks if the table is a <code>.META.</code> table
*
/**
* Checks if the table is a <code>.META.</code> table
*
* @return true if table is <code> .META. </code> region.
*/
public boolean isMetaTable() {
return isMetaRegion() && !isRootRegion();
}
/**
* Checks of the tableName being passed represents either
* Checks of the tableName being passed represents either
* <code > -ROOT- </code> or <code> .META. </code>
*
* @return true if a tablesName is either <code> -ROOT- </code>
*
* @return true if a tablesName is either <code> -ROOT- </code>
* or <code> .META. </code>
*/
public static boolean isMetaTable(final byte [] tableName) {
@ -441,7 +442,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
+ " conflicted with system reserved words");
}
for (int i = 0; i < tableName.length; i++) {
if (Character.isLetterOrDigit(tableName[i]) || tableName[i] == '_' ||
if (Character.isLetterOrDigit(tableName[i]) || tableName[i] == '_' ||
tableName[i] == '-' || tableName[i] == '.') {
continue;
}
@ -454,7 +455,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Getter for accessing the metadata associated with the key
*
*
* @param key The key.
* @return The value.
* @see #values
@ -472,7 +473,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Getter for accessing the metadata associated with the key
*
*
* @param key The key.
* @return The value.
* @see #values
@ -486,7 +487,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Getter for fetching an unmodifiable {@link #values} map.
*
*
* @return unmodifiable map {@link #values}.
* @see #values
*/
@ -497,7 +498,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Setter for storing metadata as a (key, value) pair in {@link #values} map
*
*
* @param key The key.
* @param value The value.
* @see #values
@ -526,7 +527,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Setter for storing metadata as a (key, value) pair in {@link #values} map
*
*
* @param key The key.
* @param value The value.
* @see #values
@ -538,10 +539,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
setValue(Bytes.toBytes(key), Bytes.toBytes(value));
}
}
/**
* Remove metadata represented by the key from the {@link #values} map
*
*
* @param key Key whose key and value we're to remove from HTableDescriptor
* parameters.
*/
@ -560,9 +561,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Check if the readOnly flag of the table is set. If the readOnly flag is
* Check if the readOnly flag of the table is set. If the readOnly flag is
* set then the contents of the table can only be read from but not modified.
*
*
* @return true if all columns in the table should be read only
*/
public boolean isReadOnly() {
@ -571,9 +572,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Setting the table as read only sets all the columns in the table as read
* only. By default all tables are modifiable, but if the readOnly flag is
* only. By default all tables are modifiable, but if the readOnly flag is
* set to true then the contents of the table can only be read but not modified.
*
*
* @param readOnly True if all of the columns in the table should be read
* only.
*/
@ -582,10 +583,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Check if deferred log edits are enabled on the table.
*
* Check if deferred log edits are enabled on the table.
*
* @return true if that deferred log flush is enabled on the table
*
*
* @see #setDeferredLogFlush(boolean)
*/
public synchronized boolean isDeferredLogFlush() {
@ -597,17 +598,17 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* This is used to defer the log edits syncing to the file system. Everytime
* an edit is sent to the server it is first sync'd to the file system by the
* log writer. This sync is an expensive operation and thus can be deferred so
* This is used to defer the log edits syncing to the file system. Everytime
* an edit is sent to the server it is first sync'd to the file system by the
* log writer. This sync is an expensive operation and thus can be deferred so
* that the edits are kept in memory for a specified period of time as represented
* by <code> hbase.regionserver.optionallogflushinterval </code> and not flushed
* for every edit.
* <p>
* NOTE:- This option might result in data loss if the region server crashes
* before these deferred edits in memory are flushed onto the filesystem.
* before these deferred edits in memory are flushed onto the filesystem.
* </p>
*
*
* @param isDeferredLogFlush
*/
public synchronized void setDeferredLogFlush(final boolean isDeferredLogFlush) {
@ -617,8 +618,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Get the name of the table as a byte array.
*
* @return name of table
*
* @return name of table
*/
public byte [] getName() {
return name;
@ -626,18 +627,18 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Get the name of the table as a String
*
* @return name of table as a String
*
* @return name of table as a String
*/
public String getNameAsString() {
return this.nameAsString;
}
/**
* This get the class associated with the region split policy which
* This get the class associated with the region split policy which
* determines when a region split should occur. The class used by
* default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
*
*
* @return the class name of the region split policy for this table.
* If this returns null, the default split policy is used.
*/
@ -646,9 +647,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Set the name of the table.
*
* @param name name of table
* Set the name of the table.
*
* @param name name of table
*/
public void setName(byte[] name) {
this.name = name;
@ -656,7 +657,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
setMetaFlags(this.name);
}
/**
/**
* Returns the maximum size upto which a region can grow to after which a region
* split is triggered. The region size is represented by the size of the biggest
* store file in that region.
@ -672,19 +673,19 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
return -1;
}
/**
* Sets the maximum size upto which a region can grow to after which a region
* split is triggered. The region size is represented by the size of the biggest
* store file in that region, i.e. If the biggest store file grows beyond the
* maxFileSize, then the region split is triggered. This defaults to a value of
* split is triggered. The region size is represented by the size of the biggest
* store file in that region, i.e. If the biggest store file grows beyond the
* maxFileSize, then the region split is triggered. This defaults to a value of
* 256 MB.
* <p>
* This is not an absolute value and might vary. Assume that a single row exceeds
* This is not an absolute value and might vary. Assume that a single row exceeds
* the maxFileSize then the storeFileSize will be greater than maxFileSize since
* a single row cannot be split across multiple regions
* a single row cannot be split across multiple regions
* </p>
*
*
* @param maxFileSize The maximum file size that a store file can grow to
* before a split is triggered.
*/
@ -708,9 +709,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Represents the maximum size of the memstore after which the contents of the
* Represents the maximum size of the memstore after which the contents of the
* memstore are flushed to the filesystem. This defaults to a size of 64 MB.
*
*
* @param memstoreFlushSize memory cache flush size for each hregion
*/
public void setMemStoreFlushSize(long memstoreFlushSize) {
@ -857,12 +858,12 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Compare the contents of the descriptor with another one passed as a parameter.
* Compare the contents of the descriptor with another one passed as a parameter.
* Checks if the obj passed is an instance of HTableDescriptor, if yes then the
* contents of the descriptors are compared.
*
*
* @return true if the contents of the the two descriptors exactly match
*
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
@ -897,7 +898,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
* <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
* and is used for de-serialization of the HTableDescriptor over RPC
* @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead.
*/
@ -944,7 +945,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
* <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
* and is used for serialization of the HTableDescriptor over RPC
* @deprecated Writables are going away.
* Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
@ -980,9 +981,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Compares the descriptor with another descriptor which is passed as a parameter.
* This compares the content of the two descriptors and not the reference.
*
* @return 0 if the contents of the descriptors are exactly matching,
* 1 if there is a mismatch in the contents
*
* @return 0 if the contents of the descriptors are exactly matching,
* 1 if there is a mismatch in the contents
*/
@Override
public int compareTo(final HTableDescriptor other) {
@ -1022,59 +1023,59 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Returns an unmodifiable collection of all the {@link HColumnDescriptor}
* Returns an unmodifiable collection of all the {@link HColumnDescriptor}
* of all the column families of the table.
*
*
* @return Immutable collection of {@link HColumnDescriptor} of all the
* column families.
* column families.
*/
public Collection<HColumnDescriptor> getFamilies() {
return Collections.unmodifiableCollection(this.families.values());
}
/**
* Returns all the column family names of the current table. The map of
* HTableDescriptor contains mapping of family name to HColumnDescriptors.
* This returns all the keys of the family map which represents the column
* family names of the table.
*
* Returns all the column family names of the current table. The map of
* HTableDescriptor contains mapping of family name to HColumnDescriptors.
* This returns all the keys of the family map which represents the column
* family names of the table.
*
* @return Immutable sorted set of the keys of the families.
*/
public Set<byte[]> getFamiliesKeys() {
return Collections.unmodifiableSet(this.families.keySet());
}
/**
* Returns an array all the {@link HColumnDescriptor} of the column families
/**
* Returns an array all the {@link HColumnDescriptor} of the column families
* of the table.
*
* @return Array of all the HColumnDescriptors of the current table
*
*
* @return Array of all the HColumnDescriptors of the current table
*
* @see #getFamilies()
*/
public HColumnDescriptor[] getColumnFamilies() {
Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
}
/**
* Returns the HColumnDescriptor for a specific column family with name as
* Returns the HColumnDescriptor for a specific column family with name as
* specified by the parameter column.
*
* @param column Column family name
*
* @param column Column family name
* @return Column descriptor for the passed family name or the family on
* passed in column.
*/
public HColumnDescriptor getFamily(final byte [] column) {
return this.families.get(column);
}
/**
* Removes the HColumnDescriptor with name specified by the parameter column
* Removes the HColumnDescriptor with name specified by the parameter column
* from the table descriptor
*
*
* @param column Name of the column family to be removed.
* @return Column descriptor for the passed family name or the family on
* passed in column.
@ -1082,7 +1083,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
public HColumnDescriptor removeFamily(final byte [] column) {
return this.families.remove(column);
}
/**
* Add a table coprocessor to this table. The coprocessor
@ -1098,7 +1099,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
}
/**
* Add a table coprocessor to this table. The coprocessor
* type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
@ -1251,11 +1252,11 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
if (match != null)
remove(match);
}
/**
* Returns the {@link Path} object representing the table directory under
* path rootdir
*
* Returns the {@link Path} object representing the table directory under
* path rootdir
*
* @param rootdir qualified path of HBase root directory
* @param tableName name of table
* @return {@link Path} for table
@ -1288,6 +1289,18 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
});
static {
try {
META_TABLEDESC.addCoprocessor(
"org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
null, Coprocessor.PRIORITY_SYSTEM, null);
} catch (IOException ex) {
//LOG.warn("exception in loading coprocessor for the META table");
throw new RuntimeException(ex);
}
}
@Deprecated
public void setOwner(User owner) {
setOwnerString(owner != null ? owner.getShortName() : null);
@ -1326,7 +1339,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
* @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
* @throws DeserializationException
* @throws IOException
* @throws IOException
* @see #toByteArray()
*/
public static HTableDescriptor parseFrom(final byte [] bytes)

View File

@ -37,7 +37,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
import java.util.TreeSet;
/**
* Scanner class that contains the <code>.META.</code> table scanning logic
@ -49,7 +48,7 @@ import java.util.TreeSet;
*
* <p> Note that during concurrent region splits, the scanner might not see
* META changes across rows (for parent and daughter entries) consistently.
* see HBASE-5986, and {@link BlockingMetaScannerVisitor} for details. </p>
* see HBASE-5986, and {@link DefaultMetaScannerVisitor} for details. </p>
*/
@InterfaceAudience.Private
public class MetaScanner {
@ -276,7 +275,7 @@ public class MetaScanner {
public static List<HRegionInfo> listAllRegions(Configuration conf, final boolean offlined)
throws IOException {
final List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
MetaScannerVisitor visitor = new BlockingMetaScannerVisitor(conf) {
MetaScannerVisitor visitor = new DefaultMetaScannerVisitor(conf) {
@Override
public boolean processRowInternal(Result result) throws IOException {
if (result == null || result.isEmpty()) {
@ -316,10 +315,7 @@ public class MetaScanner {
public boolean processRowInternal(Result rowResult) throws IOException {
HRegionInfo info = getHRegionInfo(rowResult);
ServerName serverName = HRegionInfo.getServerName(rowResult);
if (!(info.isOffline() || info.isSplit())) {
regions.put(new UnmodifyableHRegionInfo(info), serverName);
}
regions.put(new UnmodifyableHRegionInfo(info), serverName);
return true;
}
};
@ -355,39 +351,17 @@ public class MetaScanner {
* does not guarantee ordered traversal of meta entries, and can block until the
* META entries for daughters are available during splits.
*/
public static abstract class BlockingMetaScannerVisitor
public static abstract class DefaultMetaScannerVisitor
extends MetaScannerVisitorBase {
private static final int DEFAULT_BLOCKING_TIMEOUT = 10000;
private Configuration conf;
private TreeSet<byte[]> daughterRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
private int blockingTimeout;
private HTable metaTable;
protected Configuration conf;
public BlockingMetaScannerVisitor(Configuration conf) {
public DefaultMetaScannerVisitor(Configuration conf) {
this.conf = conf;
this.blockingTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
DEFAULT_BLOCKING_TIMEOUT);
}
public abstract boolean processRowInternal(Result rowResult) throws IOException;
@Override
public void close() throws IOException {
super.close();
if (metaTable != null) {
metaTable.close();
metaTable = null;
}
}
public HTable getMetaTable() throws IOException {
if (metaTable == null) {
metaTable = new HTable(conf, HConstants.META_TABLE_NAME);
}
return metaTable;
}
@Override
public boolean processRow(Result rowResult) throws IOException {
HRegionInfo info = getHRegionInfo(rowResult);
@ -395,67 +369,11 @@ public class MetaScanner {
return true;
}
if (daughterRegions.remove(info.getRegionName())) {
return true; //we have already processed this row
//skip over offline and split regions
if (!(info.isOffline() || info.isSplit())) {
return processRowInternal(rowResult);
}
if (info.isSplitParent()) {
/* we have found a parent region which was split. We have to ensure that it's daughters are
* seen by this scanner as well, so we block until they are added to the META table. Even
* though we are waiting for META entries, ACID semantics in HBase indicates that this
* scanner might not see the new rows. So we manually query the daughter rows */
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowResult);
HRegionInfo splitA = daughters.getFirst();
HRegionInfo splitB = daughters.getSecond();
HTable metaTable = getMetaTable();
long start = System.currentTimeMillis();
Result resultA = getRegionResultBlocking(metaTable, blockingTimeout,
splitA.getRegionName());
if (resultA != null) {
processRow(resultA);
daughterRegions.add(splitA.getRegionName());
} else {
throw new RegionOfflineException("Split daughter region " +
splitA.getRegionNameAsString() + " cannot be found in META.");
}
long rem = blockingTimeout - (System.currentTimeMillis() - start);
Result resultB = getRegionResultBlocking(metaTable, rem,
splitB.getRegionName());
if (resultB != null) {
processRow(resultB);
daughterRegions.add(splitB.getRegionName());
} else {
throw new RegionOfflineException("Split daughter region " +
splitB.getRegionNameAsString() + " cannot be found in META.");
}
}
return processRowInternal(rowResult);
}
private Result getRegionResultBlocking(HTable metaTable, long timeout, byte[] regionName)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("blocking until region is in META: " + Bytes.toStringBinary(regionName));
}
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < timeout) {
Get get = new Get(regionName);
Result result = metaTable.get(get);
HRegionInfo info = getHRegionInfo(result);
if (info != null) {
return result;
}
try {
Thread.sleep(10);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
break;
}
}
return null;
return true;
}
}
@ -465,7 +383,7 @@ public class MetaScanner {
* does not guarantee ordered traversal of meta entries, and can block until the
* META entries for daughters are available during splits.
*/
public static abstract class TableMetaScannerVisitor extends BlockingMetaScannerVisitor {
public static abstract class TableMetaScannerVisitor extends DefaultMetaScannerVisitor {
private byte[] tableName;
public TableMetaScannerVisitor(Configuration conf, byte[] tableName) {

View File

@ -34,8 +34,15 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiRowMutationService;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.protobuf.ServiceException;
/**
* Writes region and assignment information to <code>.META.</code>.
* TODO: Put MetaReader and MetaEditor together; doesn't make sense having
@ -210,8 +217,8 @@ public class MetaEditor {
* Adds a (single) META row for the specified new region and its daughters. Note that this does
* not add its daughter's as different rows, but adds information about the daughters
* in the same row as the parent. Use
* {@link #offlineParentInMeta(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo)} and
* {@link #addDaughter(CatalogTracker, HRegionInfo, ServerName, long)} if you want to do that.
* {@link #splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
* if you want to do that.
* @param meta the HTable for META
* @param regionInfo region information
* @param splitA first split daughter of the parent regionInfo
@ -245,32 +252,6 @@ public class MetaEditor {
LOG.info("Added " + puts.size() + " regions in META");
}
/**
* Offline parent in meta.
* Used when splitting.
* @param catalogTracker
* @param parent
* @param a Split daughter region A
* @param b Split daughter region B
* @throws NotAllMetaRegionsOnlineException
* @throws IOException
*/
public static void offlineParentInMeta(CatalogTracker catalogTracker,
HRegionInfo parent, final HRegionInfo a, final HRegionInfo b)
throws NotAllMetaRegionsOnlineException, IOException {
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
HTable meta = MetaReader.getMetaHTable(catalogTracker);
try {
addRegionToMeta(meta, copyOfParent, a, b);
LOG.info("Offlined parent region " + parent.getRegionNameAsString() +
" in META");
} finally {
meta.close();
}
}
/**
* Adds a daughter region entry to meta.
* @param regionInfo the region to put
@ -290,6 +271,60 @@ public class MetaEditor {
(sn == null? ", serverName=null": ", serverName=" + sn.toString()));
}
/**
* Splits the region into two in an atomic operation. Offlines the parent
* region with the information that it is split into two, and also adds
* the daughter regions. Does not add the location information to the daughter
* regions since they are not open yet.
* @param catalogTracker the catalog tracker
* @param parent the parent region which is split
* @param splitA Split daughter region A
* @param splitB Split daughter region A
* @param sn the location of the region
*/
public static void splitRegion(final CatalogTracker catalogTracker,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName sn) throws IOException {
HTable meta = MetaReader.getMetaHTable(catalogTracker);
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
//Put for parent
Put putParent = makePutFromRegionInfo(copyOfParent);
addDaughtersToPut(putParent, splitA, splitB);
//Puts for daughters
Put putA = makePutFromRegionInfo(splitA);
Put putB = makePutFromRegionInfo(splitB);
addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, sn, 1);
byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
multiPut(meta, tableRow, putParent, putA, putB);
}
/**
* Performs an atomic multi-Put operation against the given table.
*/
private static void multiPut(HTable table, byte[] row, Put... puts) throws IOException {
CoprocessorRpcChannel channel = table.coprocessorService(row);
MultiMutateRequest.Builder mmrBuilder = MultiMutateRequest.newBuilder();
for (Put put : puts) {
mmrBuilder.addMutationRequest(ProtobufUtil.toMutate(MutateType.PUT, put));
}
MultiRowMutationService.BlockingInterface service =
MultiRowMutationService.newBlockingStub(channel);
try {
service.mutateRows(null, mmrBuilder.build());
} catch (ServiceException ex) {
ProtobufUtil.toIOException(ex);
}
}
/**
* Updates the location of the specified META region in ROOT to be the
* specified server hostname and startcode.

View File

@ -27,7 +27,6 @@ import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -753,10 +752,6 @@ Server {
this.balancer.setClusterStatus(getClusterStatus());
// Fixing up missing daughters if any
status.setStatus("Fixing up missing daughters");
fixupDaughters(status);
if (!masterRecovery) {
// Start balancer and meta catalog janitor after meta and regions have
// been assigned.
@ -944,41 +939,6 @@ Server {
}
}
void fixupDaughters(final MonitoredTask status) throws IOException {
final Map<HRegionInfo, Result> offlineSplitParents =
new HashMap<HRegionInfo, Result>();
// This visitor collects offline split parents in the .META. table
MetaReader.Visitor visitor = new MetaReader.Visitor() {
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
HRegionInfo info =
HRegionInfo.getHRegionInfo(r);
if (info == null) return true; // Keep scanning
if (info.isOffline() && info.isSplit()) {
offlineSplitParents.put(info, r);
}
// Returning true means "keep scanning"
return true;
}
};
// Run full scan of .META. catalog table passing in our custom visitor
MetaReader.fullScan(this.catalogTracker, visitor);
// Now work on our list of found parents. See if any we can clean up.
int fixups = 0;
for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
ServerName sn = HRegionInfo.getServerName(e.getValue());
if (!serverManager.isServerDead(sn)) { // Otherwise, let SSH take care of it
fixups += ServerShutdownHandler.fixupDaughters(
e.getValue(), assignmentManager, catalogTracker);
}
}
if (fixups != 0) {
LOG.info("Scanned the catalog and fixed up " + fixups +
" missing daughter region(s)");
}
}
/**
* Split a server's log and expire it if we find it is one of the online
* servers.

View File

@ -443,7 +443,8 @@ public class MasterFileSystem {
bootstrap(rd, c);
}
// Create tableinfo-s for ROOT and META if not already there.
// Create tableinfo-s for ROOT and META if not already there. This also updates the
//descriptors if they are older versions.
FSTableDescriptors.createTableDescriptor(fs, rd, HTableDescriptor.ROOT_TABLEDESC, false);
FSTableDescriptors.createTableDescriptor(fs, rd, HTableDescriptor.META_TABLEDESC, false);

View File

@ -27,12 +27,10 @@ import java.util.NavigableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.EventHandler;
@ -43,8 +41,6 @@ import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.zookeeper.KeeperException;
@ -310,12 +306,9 @@ public class ServerShutdownHandler extends EventHandler {
return false;
}
if (hri.isOffline() && hri.isSplit()) {
LOG.debug("Offlined and split region " + hri.getRegionNameAsString() +
"; checking daughter presence");
if (MetaReader.getRegion(catalogTracker, hri.getRegionName()) == null) {
return false;
}
fixupDaughters(result, assignmentManager, catalogTracker);
//HBASE-7721: Split parent and daughters are inserted into META as an atomic operation.
//If the meta scanner saw the parent split, then it should see the daughters as assigned
//to the dead server. We don't have to do anything.
return false;
}
boolean disabling = assignmentManager.getZKTable().isDisablingTable(
@ -327,124 +320,4 @@ public class ServerShutdownHandler extends EventHandler {
}
return true;
}
/**
* Check that daughter regions are up in .META. and if not, add them.
* @param result The contents of the parent row in .META.
* @param assignmentManager
* @param catalogTracker
* @return the number of daughters missing and fixed
* @throws IOException
*/
public static int fixupDaughters(final Result result,
final AssignmentManager assignmentManager,
final CatalogTracker catalogTracker)
throws IOException {
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(result);
int fixedA = fixupDaughter(result, daughters.getFirst(),
assignmentManager, catalogTracker);
int fixedB = fixupDaughter(result, daughters.getSecond(),
assignmentManager, catalogTracker);
return fixedA + fixedB;
}
/**
* Check individual daughter is up in .META.; fixup if its not.
* @param result The contents of the parent row in .META. - not used
* @param daughter Which daughter to check for.
* @return 1 if the daughter is missing and fixed. Otherwise 0
* @throws IOException
*/
static int fixupDaughter(final Result result, HRegionInfo daughter,
final AssignmentManager assignmentManager,
final CatalogTracker catalogTracker)
throws IOException {
if (daughter == null) return 0;
if (isDaughterMissing(catalogTracker, daughter)) {
LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString());
MetaEditor.addDaughter(catalogTracker, daughter, null, HConstants.NO_SEQNUM);
// TODO: Log WARN if the regiondir does not exist in the fs. If its not
// there then something wonky about the split -- things will keep going
// but could be missing references to parent region.
// And assign it.
assignmentManager.assign(daughter, true, true);
return 1;
} else {
LOG.debug("Daughter " + daughter.getRegionNameAsString() + " present");
}
return 0;
}
/**
* Look for presence of the daughter OR of a split of the daughter in .META.
* Daughter could have been split over on regionserver before a run of the
* catalogJanitor had chance to clear reference from parent.
* @param daughter Daughter region to search for.
* @throws IOException
*/
private static boolean isDaughterMissing(final CatalogTracker catalogTracker,
final HRegionInfo daughter) throws IOException {
FindDaughterVisitor visitor = new FindDaughterVisitor(daughter);
// Start the scan at what should be the daughter's row in the .META.
// We will either 1., find the daughter or some derivative split of the
// daughter (will have same table name and start row at least but will sort
// after because has larger regionid -- the regionid is timestamp of region
// creation), OR, we will not find anything with same table name and start
// row. If the latter, then assume daughter missing and do fixup.
byte [] startrow = daughter.getRegionName();
MetaReader.fullScan(catalogTracker, visitor, startrow);
return !visitor.foundDaughter();
}
/**
* Looks for daughter. Sets a flag if daughter or some progeny of daughter
* is found up in <code>.META.</code>.
*/
static class FindDaughterVisitor implements MetaReader.Visitor {
private final HRegionInfo daughter;
private boolean found = false;
FindDaughterVisitor(final HRegionInfo daughter) {
this.daughter = daughter;
}
/**
* @return True if we found a daughter region during our visiting.
*/
boolean foundDaughter() {
return this.found;
}
@Override
public boolean visit(Result r) throws IOException {
HRegionInfo hri =
HRegionInfo.getHRegionInfo(r);
if (hri == null) {
LOG.warn("No serialized HRegionInfo in " + r);
return true;
}
byte [] value = r.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
// See if daughter is assigned to some server
if (value == null) return false;
// Now see if we have gone beyond the daughter's startrow.
if (!Bytes.equals(daughter.getTableName(),
hri.getTableName())) {
// We fell into another table. Stop scanning.
return false;
}
// If our start rows do not compare, move on.
if (!Bytes.equals(daughter.getStartKey(), hri.getStartKey())) {
return false;
}
// Else, table name and start rows compare. It means that the daughter
// or some derivative split of the daughter is up in .META. Daughter
// exists.
this.found = true;
return false;
}
}
}

View File

@ -1692,12 +1692,10 @@ public class HRegionServer implements ClientProtocol,
}
@Override
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
final boolean daughter)
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException {
checkOpen();
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() +
", daughter=" + daughter);
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString());
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
@ -1718,17 +1716,11 @@ public class HRegionServer implements ClientProtocol,
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV, openSeqNum);
} else {
if (daughter) {
// If daughter of a split, update whole row, not just location.
MetaEditor.addDaughter(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV, openSeqNum);
} else {
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV, openSeqNum);
}
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV, openSeqNum);
}
LOG.info("Done with post open deploy task for region=" +
r.getRegionNameAsString() + ", daughter=" + daughter);
r.getRegionNameAsString());
}

View File

@ -40,7 +40,7 @@ public interface RegionServerServices extends OnlineRegions {
*/
public boolean isStopping();
/** @return the HLog for a particular region. Pass null for getting the
/** @return the HLog for a particular region. Pass null for getting the
* default (common) WAL */
public HLog getWAL(HRegionInfo regionInfo) throws IOException;
@ -62,15 +62,13 @@ public interface RegionServerServices extends OnlineRegions {
/**
* Tasks to perform after region open to complete deploy of region on
* regionserver
*
*
* @param r Region to open.
* @param ct Instance of {@link CatalogTracker}
* @param daughter True if this is daughter of a split
* @throws KeeperException
* @throws IOException
*/
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
final boolean daughter)
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException;
/**

View File

@ -222,7 +222,7 @@ public class SplitTransaction {
if (this.parent.getCoprocessorHost() != null) {
this.parent.getCoprocessorHost().preSplit();
}
// Coprocessor callback
if (this.parent.getCoprocessorHost() != null) {
this.parent.getCoprocessorHost().preSplit(this.splitrow);
@ -288,7 +288,7 @@ public class SplitTransaction {
throw new IOException(exceptionToThrow);
}
if (hstoreFilesToSplit.size() == 0) {
String errorMsg = "No store files to split for the region "+this.parent.getRegionInfo();
LOG.error(errorMsg);
@ -335,10 +335,14 @@ public class SplitTransaction {
// HBase-4562).
this.journal.add(JournalEntry.PONR);
// Edit parent in meta. Offlines parent region and adds splita and splitb.
// Edit parent in meta. Offlines parent region and adds splita and splitb
// as an atomic update. See HBASE-7721. This update to META makes the region
// will determine whether the region is split or not in case of failures.
// If it is successful, master will roll-forward, if not, master will rollback
// and assign the parent region.
if (!testing) {
MetaEditor.offlineParentInMeta(server.getCatalogTracker(),
this.parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo());
MetaEditor.splitRegion(server.getCatalogTracker(), parent.getRegionInfo(),
a.getRegionInfo(), b.getRegionInfo(), server.getServerName());
}
return new PairOfSameType<HRegion>(a, b);
}
@ -388,10 +392,10 @@ public class SplitTransaction {
if (services != null) {
try {
// add 2nd daughter first (see HBASE-4335)
services.postOpenDeployTasks(b, server.getCatalogTracker(), true);
services.postOpenDeployTasks(b, server.getCatalogTracker());
// Should add it to OnlineRegions
services.addToOnlineRegions(b);
services.postOpenDeployTasks(a, server.getCatalogTracker(), true);
services.postOpenDeployTasks(a, server.getCatalogTracker());
services.addToOnlineRegions(a);
} catch (KeeperException ke) {
throw new IOException(ke);
@ -733,7 +737,7 @@ public class SplitTransaction {
if (this.parent.getCoprocessorHost() != null) {
this.parent.getCoprocessorHost().preRollBackSplit();
}
boolean result = true;
FileSystem fs = this.parent.getFilesystem();
ListIterator<JournalEntry> iterator =

View File

@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.zookeeper.KeeperException;
@ -318,7 +317,7 @@ public class OpenRegionHandler extends EventHandler {
public void run() {
try {
this.services.postOpenDeployTasks(this.region,
this.server.getCatalogTracker(), false);
this.server.getCatalogTracker());
} catch (Exception e) {
LOG.warn("Exception running postOpenDeployTasks; region=" +
this.region.getRegionInfo().getEncodedName(), e);
@ -446,7 +445,7 @@ public class OpenRegionHandler extends EventHandler {
// Instantiate the region. This also periodically tickles our zk OPENING
// state so master doesn't timeout this region in transition.
region = HRegion.openHRegion(this.regionInfo, this.htd,
this.rsServices.getWAL(this.regionInfo),
this.rsServices.getWAL(this.regionInfo),
this.server.getConfiguration(),
this.rsServices,
new CancelableProgressable() {
@ -491,7 +490,7 @@ public class OpenRegionHandler extends EventHandler {
* @param encodedName Name of the znode file (Region encodedName is the znode
* name).
* @param versionOfOfflineNode - version Of OfflineNode that needs to be compared
* before changing the node's state from OFFLINE
* before changing the node's state from OFFLINE
* @return True if successful transition.
*/
boolean transitionZookeeperOfflineToOpening(final String encodedName,

View File

@ -54,7 +54,7 @@ import com.google.common.primitives.Ints;
* passed filesystem. It expects descriptors to be in a file under the
* table's directory in FS. Can be read-only -- i.e. does not modify
* the filesystem or can be read and write.
*
*
* <p>Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept under the table directory in the filesystem.
* It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
@ -163,7 +163,7 @@ public class FSTableDescriptors implements TableDescriptors {
return cachedtdm.getTableDescriptor();
}
}
TableDescriptorModtime tdmt = null;
try {
tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename);
@ -174,7 +174,7 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, ioe);
}
if (tdmt == null) {
LOG.warn("The following folder is in HBase's root directory and " +
"doesn't contain a table descriptor, " +
@ -241,7 +241,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Checks if <code>.tableinfo<code> exists for given table
*
*
* @param fs file system
* @param rootdir root directory of HBase installation
* @param tableName name of table
@ -488,7 +488,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @param tableDir
* @param status
* @return Descriptor file or null if we failed write.
* @throws IOException
* @throws IOException
*/
private static Path writeTableDescriptor(final FileSystem fs,
final HTableDescriptor hTableDescriptor, final Path tableDir,
@ -554,7 +554,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
*
* @param htableDescriptor
* @param conf
*/
@ -568,7 +568,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
*
* @param htableDescriptor
* @param conf
* @param forceCreation True if we are to overwrite existing file.
@ -597,7 +597,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
*
* @param fs
* @param htableDescriptor
* @param rootdir
@ -630,8 +630,10 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.info("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
LOG.info("TableInfo already exists.. Skipping creation");
return false;
if (getTableDescriptor(fs, status.getPath().getParent()).equals(htableDescriptor)) {
LOG.info("TableInfo already exists.. Skipping creation");
return false;
}
}
}
}

View File

@ -144,7 +144,7 @@ public abstract class FSUtils {
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {
LOG.warn("Could not find scheme for uri " +
LOG.warn("Could not find scheme for uri " +
fs.getUri() + ", default to hdfs");
scheme = "hdfs";
}
@ -190,7 +190,7 @@ public abstract class FSUtils {
* <li>use the default block size</li>
* <li>not track progress</li>
* </ol>
*
*
* @param fs {@link FileSystem} on which to write the file
* @param path {@link Path} to the file to write
* @return output stream to the created file
@ -210,7 +210,7 @@ public abstract class FSUtils {
* <li>use the default block size</li>
* <li>not track progress</li>
* </ol>
*
*
* @param fs {@link FileSystem} on which to write the file
* @param path {@link Path} to the file to write
* @param perm
@ -230,7 +230,7 @@ public abstract class FSUtils {
/**
* Get the file permissions specified in the configuration, if they are
* enabled.
*
*
* @param fs filesystem that the file will be created on.
* @param conf configuration to read for determining if permissions are
* enabled and which to use
@ -322,11 +322,11 @@ public abstract class FSUtils {
}
/**
* Check whether dfs is in safemode.
* Check whether dfs is in safemode.
* @param conf
* @throws IOException
*/
public static void checkDfsSafeMode(final Configuration conf)
public static void checkDfsSafeMode(final Configuration conf)
throws IOException {
boolean isInSafeMode = false;
FileSystem fs = FileSystem.get(conf);
@ -338,7 +338,7 @@ public abstract class FSUtils {
throw new IOException("File system is in safemode, it can't be written now");
}
}
/**
* Verifies current version of file system
*
@ -429,7 +429,7 @@ public abstract class FSUtils {
* @param message if true, issues a message on System.out
*
* @throws IOException e
* @throws DeserializationException
* @throws DeserializationException
*/
public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
throws IOException, DeserializationException {
@ -446,7 +446,7 @@ public abstract class FSUtils {
* @param retries number of times to retry
*
* @throws IOException e
* @throws DeserializationException
* @throws DeserializationException
*/
public static void checkVersion(FileSystem fs, Path rootdir,
boolean message, int wait, int retries)
@ -611,7 +611,7 @@ public abstract class FSUtils {
/**
* @param cid
* @throws IOException
* @throws IOException
*/
private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
final ClusterId cid)
@ -786,9 +786,9 @@ public abstract class FSUtils {
* @param fs file system
* @param status file status of the file
* @param start start position of the portion
* @param length length of the portion
* @param length length of the portion
* @return The HDFS blocks distribution
*/
*/
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
final FileSystem fs, FileStatus status, long start, long length)
throws IOException {
@ -800,12 +800,12 @@ public abstract class FSUtils {
long len = bl.getLength();
blocksDistribution.addHostsAndBlockWeight(hosts, len);
}
return blocksDistribution;
}
/**
* Runs through the hbase rootdir and checks all stores have only
* one file in them -- that is, they've been major compacted. Looks
@ -874,7 +874,7 @@ public abstract class FSUtils {
*
* @param master The master defining the HBase root and file system.
* @return A map for each table and its percentage.
*
*
* @throws IOException When scanning the directory fails.
*/
public static Map<String, Integer> getTableFragmentation(
@ -1150,7 +1150,7 @@ public abstract class FSUtils {
}
/**
* Recover file lease. Used when a file might be suspect
* Recover file lease. Used when a file might be suspect
* to be had been left open by another process.
* @param fs FileSystem handle
* @param p Path of file to recover lease
@ -1159,7 +1159,7 @@ public abstract class FSUtils {
*/
public abstract void recoverFileLease(final FileSystem fs, final Path p,
Configuration conf) throws IOException;
/**
* @param fs
* @param rootdir
@ -1322,10 +1322,10 @@ public abstract class FSUtils {
throws IOException {
return getRootDir(conf).getFileSystem(conf);
}
/**
* Runs through the HBase rootdir and creates a reverse lookup map for
* table StoreFile names to the full Path.
* Runs through the HBase rootdir and creates a reverse lookup map for
* table StoreFile names to the full Path.
* <br>
* Example...<br>
* Key = 3944417774205889744 <br>
@ -1340,8 +1340,8 @@ public abstract class FSUtils {
final FileSystem fs, final Path hbaseRootDir)
throws IOException {
Map<String, Path> map = new HashMap<String, Path>();
// if this method looks similar to 'getTableFragmentation' that is because
// if this method looks similar to 'getTableFragmentation' that is because
// it was borrowed from it.
// only include the directory paths to tables
@ -1367,17 +1367,17 @@ public abstract class FSUtils {
Path sf = sfStatus.getPath();
map.put( sf.getName(), sf);
}
}
}
}
return map;
}
/**
* Calls fs.listStatus() and treats FileNotFoundException as non-fatal
* This accommodates differences between hadoop versions
*
*
* @param fs file system
* @param dir directory
* @param filter path filter
@ -1410,7 +1410,7 @@ public abstract class FSUtils {
/**
* Calls fs.delete() and returns the value returned by the fs.delete()
*
*
* @param fs
* @param path
* @param recursive
@ -1424,7 +1424,7 @@ public abstract class FSUtils {
/**
* Calls fs.exists(). Checks if the specified path exists
*
*
* @param fs
* @param path
* @return the value returned by fs.exists()
@ -1436,7 +1436,7 @@ public abstract class FSUtils {
/**
* Throw an exception if an action is not permitted by a user on a file.
*
*
* @param ugi
* the user
* @param file
@ -1485,7 +1485,7 @@ public abstract class FSUtils {
/**
* Recursive helper to log the state of the FS
*
*
* @see #logFileSystemState(FileSystem, Path, Log)
*/
private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)

View File

@ -195,9 +195,7 @@ public class TestMasterCoprocessorExceptionWithAbort {
// (namely the part that shows the set of loaded coprocessors).
// In this test, there is only a single coprocessor (BuggyMasterObserver).
assertTrue(master.getLoadedCoprocessors().
equals("[" +
TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName() +
"]"));
contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
CreateTableThread createTableThread = new CreateTableThread(UTIL);

View File

@ -19,11 +19,21 @@
package org.apache.hadoop.hbase.coprocessor;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.InterruptedIOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -35,8 +45,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.*;
/**
* Tests unhandled exceptions thrown by coprocessors running on master.
* Expected result is that the master will remove the buggy coprocessor from
@ -173,7 +181,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
// In this test, there is only a single coprocessor (BuggyMasterObserver).
String coprocessorName =
BuggyMasterObserver.class.getName();
assertTrue(master.getLoadedCoprocessors().equals("[" + coprocessorName + "]"));
assertTrue(master.getLoadedCoprocessors().contains(coprocessorName));
HTableDescriptor htd1 = new HTableDescriptor(TEST_TABLE1);
htd1.addFamily(new HColumnDescriptor(TEST_FAMILY1));
@ -201,7 +209,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
masterTracker.masterZKNodeWasDeleted);
String loadedCoprocessors = master.getLoadedCoprocessors();
assertTrue(loadedCoprocessors.equals("[" + coprocessorName + "]"));
assertTrue(loadedCoprocessors.contains(coprocessorName));

View File

@ -291,7 +291,7 @@ class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerSer
}
@Override
public void postOpenDeployTasks(HRegion r, CatalogTracker ct, boolean daughter)
public void postOpenDeployTasks(HRegion r, CatalogTracker ct)
throws KeeperException, IOException {
// TODO Auto-generated method stub
}

View File

@ -118,7 +118,7 @@ public class TestEndToEndSplitTransaction {
// 3. finish phase II
// note that this replicates some code from SplitTransaction
// 2nd daughter first
server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker(), true);
server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker());
// Add to online regions
server.addToOnlineRegions(regions.getSecond());
// THIS is the crucial point:
@ -128,7 +128,7 @@ public class TestEndToEndSplitTransaction {
assertTrue(test(con, tableName, lastRow, server));
// first daughter second
server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker(), true);
server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker());
// Add to online regions
server.addToOnlineRegions(regions.getFirst());
assertTrue(test(con, tableName, firstRow, server));

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -34,25 +34,25 @@ import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState;
@ -338,59 +338,6 @@ public class TestSplitTransactionOnCluster {
}
}
/**
* Messy test that simulates case where SplitTransactions fails to add one
* of the daughters up into the .META. table before crash. We're testing
* fact that the shutdown handler will fixup the missing daughter region
* adding it back into .META.
* @throws IOException
* @throws InterruptedException
*/
@Test (timeout = 300000) public void testShutdownSimpleFixup()
throws IOException, InterruptedException, ServiceException {
final byte [] tableName = Bytes.toBytes("testShutdownSimpleFixup");
// Create table then get the single region for our new table.
HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
// Turn off balancer so it doesn't cut in and mess up our placements.
this.admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
try {
// Add a bit of load up into the table so splittable.
TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
// Get region pre-split.
HRegionServer server = cluster.getRegionServer(tableRegionIndex);
printOutRegions(server, "Initial regions: ");
int regionCount = ProtobufUtil.getOnlineRegions(server).size();
// Now split.
split(hri, server, regionCount);
// Get daughters
List<HRegion> daughters = checkAndGetDaughters(tableName);
// Remove one of the daughters from .META. to simulate failed insert of
// daughter region up into .META.
removeDaughterFromMeta(daughters.get(0).getRegionName());
// Now crash the server
cluster.abortRegionServer(tableRegionIndex);
waitUntilRegionServerDead();
awaitDaughters(tableName, daughters.size());
// Assert daughters are online.
regions = cluster.getRegions(tableName);
for (HRegion r: regions) {
assertTrue(daughters.contains(r));
}
} finally {
admin.setBalancerRunning(true, false);
cluster.getMaster().setCatalogJanitorEnabled(true);
t.close();
}
}
/**
* Test that if daughter split on us, we won't do the shutdown handler fixup
* just because we can't find the immediate daughter of an offlined parent.

View File

@ -47,7 +47,7 @@ import org.apache.zookeeper.KeeperException;
public class MockRegionServerServices implements RegionServerServices {
private final Map<String, HRegion> regions = new HashMap<String, HRegion>();
private boolean stopping = false;
private final ConcurrentSkipListMap<byte[], Boolean> rit =
private final ConcurrentSkipListMap<byte[], Boolean> rit =
new ConcurrentSkipListMap<byte[], Boolean>(Bytes.BYTES_COMPARATOR);
private HFileSystem hfs = null;
private ZooKeeperWatcher zkw = null;
@ -86,7 +86,7 @@ public class MockRegionServerServices implements RegionServerServices {
}
@Override
public void postOpenDeployTasks(HRegion r, CatalogTracker ct, boolean daughter)
public void postOpenDeployTasks(HRegion r, CatalogTracker ct)
throws KeeperException, IOException {
addToOnlineRegions(r);
}
@ -125,7 +125,7 @@ public class MockRegionServerServices implements RegionServerServices {
public ZooKeeperWatcher getZooKeeper() {
return zkw;
}
public RegionServerAccounting getRegionServerAccounting() {
return null;
}

View File

@ -17,7 +17,12 @@
*/
package org.apache.hadoop.hbase.util;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
@ -28,7 +33,12 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.exceptions.TableExistsException;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -266,5 +276,23 @@ public class TestFSTableDescriptors {
}
}
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
HTableDescriptor htd = new HTableDescriptor(
"testCreateTableDescriptorUpdatesIfThereExistsAlready");
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); //this will re-create
Path tableDir = FSUtils.getTablePath(testdir, htd.getName());
Path tmpTableDir = new Path(tableDir, ".tmp");
FileStatus[] statuses = fs.listStatus(tmpTableDir);
assertTrue(statuses.length == 0);
assertEquals(htd, FSTableDescriptors.getTableDescriptor(fs, tableDir));
}
}