HBASE-9278 Reading Pre-namespace meta table edits kills the reader

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1518397 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-08-28 21:46:28 +00:00
parent 02de8c40d9
commit d196d1b742
3 changed files with 61 additions and 12 deletions

View File

@ -72,8 +72,18 @@ public final class TableName implements Comparable<TableName> {
public static final TableName NAMESPACE_TABLE_NAME =
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace");
private static final String OLD_META_STR = ".META.";
private static final String OLD_ROOT_STR = "-ROOT-";
public static final String OLD_META_STR = ".META.";
public static final String OLD_ROOT_STR = "-ROOT-";
/**
* TableName for old -ROOT- table. It is used to read/process old WALs which have
* ROOT edits.
*/
public static final TableName OLD_ROOT_TABLE_NAME = getADummyTableName(OLD_ROOT_STR);
/**
* TableName for old .META. table. Used in testing.
*/
public static final TableName OLD_META_TABLE_NAME = getADummyTableName(OLD_META_STR);
private byte[] name;
private String nameAsString;
@ -231,6 +241,18 @@ public final class TableName implements Comparable<TableName> {
return ret;
}
/**
* It is used to create table names for old META, and ROOT table.
* @return a dummy TableName instance (with no validation) for the passed qualifier
*/
private static TableName getADummyTableName(String qualifier) {
TableName ret = new TableName();
ret.namespaceAsString = NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR;
ret.qualifierAsString = qualifier;
ret.nameAsString = createFullyQualified(ret.namespaceAsString, ret.qualifierAsString);
ret.name = Bytes.toBytes(qualifier);
return ret;
}
public static TableName valueOf(String namespaceAsString, String qualifierAsString) {
TableName ret = new TableName();
if(namespaceAsString == null || namespaceAsString.length() < 1) {

View File

@ -35,6 +35,7 @@ import java.util.UUID;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
@ -375,6 +376,7 @@ public class HLogKey implements WritableComparable<HLogKey> {
// @see Bytes#readByteArray(DataInput)
this.scopes = null; // writable HLogKey does not contain scopes
int len = WritableUtils.readVInt(in);
byte[] tablenameBytes = null;
if (len < 0) {
// what we just read was the version
version = Version.fromCode(len);
@ -387,12 +389,10 @@ public class HLogKey implements WritableComparable<HLogKey> {
if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
this.encodedRegionName = new byte[len];
in.readFully(this.encodedRegionName);
byte[] tablenameBytes = Bytes.readByteArray(in);
this.tablename = TableName.valueOf(tablenameBytes);
tablenameBytes = Bytes.readByteArray(in);
} else {
this.encodedRegionName = Compressor.readCompressed(in, compressionContext.regionDict);
byte[] tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict);
this.tablename = TableName.valueOf(tablenameBytes);
tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict);
}
this.logSeqNum = in.readLong();
@ -413,6 +413,19 @@ public class HLogKey implements WritableComparable<HLogKey> {
// Means it's a very old key, just continue
}
}
try {
this.tablename = TableName.valueOf(tablenameBytes);
} catch (IllegalArgumentException iae) {
if (Bytes.toString(tablenameBytes).equals(TableName.OLD_META_STR)) {
// It is a pre-namespace meta table edit, continue with new format.
LOG.info("Got an old META edit, continuing with new format ");
this.tablename = TableName.META_TABLE_NAME;
this.encodedRegionName = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
} else if (Bytes.toString(tablenameBytes).equals(TableName.OLD_ROOT_STR)) {
this.tablename = TableName.OLD_ROOT_TABLE_NAME;
throw iae;
} else throw iae;
}
// Do not need to read the clusters information as we are using protobufs from 0.95
}

View File

@ -21,16 +21,20 @@ package org.apache.hadoop.hbase.regionserver.wal;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer;
import org.apache.hadoop.hbase.util.FSUtils;
@InterfaceAudience.Private
public abstract class ReaderBase implements HLog.Reader {
private static final Log LOG = LogFactory.getLog(ReaderBase.class);
protected Configuration conf;
protected FileSystem fs;
protected Path path;
@ -95,7 +99,18 @@ public abstract class ReaderBase implements HLog.Reader {
e.setCompressionContext(compressionContext);
}
boolean hasEntry = readNext(e);
boolean hasEntry = false;
try {
hasEntry = readNext(e);
} catch (IllegalArgumentException iae) {
TableName tableName = e.getKey().getTablename();
if (tableName != null && tableName.equals(TableName.OLD_ROOT_TABLE_NAME)) {
// It is old ROOT table edit, ignore it
LOG.info("Got an old ROOT edit, ignoring ");
return next(e);
}
else throw iae;
}
edit++;
if (compressionContext != null && emptyCompressionContext) {
emptyCompressionContext = false;
@ -103,7 +118,6 @@ public abstract class ReaderBase implements HLog.Reader {
return hasEntry ? e : null;
}
@Override
public void seek(long pos) throws IOException {
if (compressionContext != null && emptyCompressionContext) {