HBASE-660 [Migration] addColumn/deleteColumn functionality in MetaUtils
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@662531 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a3a4fcce15
commit
3d1b788109
|
@ -299,7 +299,8 @@ public class HStoreKey implements WritableComparable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param column
|
* @param column
|
||||||
* @return New byte array that holds <code>column</code> family prefix.
|
* @return New byte array that holds <code>column</code> family prefix only
|
||||||
|
* (Does not include the colon DELIMITER).
|
||||||
* @throws ColumnNameParseException
|
* @throws ColumnNameParseException
|
||||||
* @see #parseColumn(byte[])
|
* @see #parseColumn(byte[])
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -25,6 +25,8 @@ import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
@ -33,6 +35,7 @@ import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
|
||||||
/** Instantiated to enable or disable a table */
|
/** Instantiated to enable or disable a table */
|
||||||
class ChangeTableState extends TableOperation {
|
class ChangeTableState extends TableOperation {
|
||||||
|
private final Log LOG = LogFactory.getLog(this.getClass());
|
||||||
private boolean online;
|
private boolean online;
|
||||||
|
|
||||||
protected final Map<String, HashSet<HRegionInfo>> servedRegions =
|
protected final Map<String, HashSet<HRegionInfo>> servedRegions =
|
||||||
|
|
|
@ -20,6 +20,9 @@
|
||||||
package org.apache.hadoop.hbase.master;
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
@ -27,6 +30,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
|
||||||
abstract class ColumnOperation extends TableOperation {
|
abstract class ColumnOperation extends TableOperation {
|
||||||
|
private final Log LOG = LogFactory.getLog(this.getClass());
|
||||||
|
|
||||||
protected ColumnOperation(final HMaster master, final byte [] tableName)
|
protected ColumnOperation(final HMaster master, final byte [] tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -53,4 +57,4 @@ abstract class ColumnOperation extends TableOperation {
|
||||||
LOG.debug("updated columns in row: " + i.getRegionName());
|
LOG.debug("updated columns in row: " + i.getRegionName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -22,8 +22,10 @@ package org.apache.hadoop.hbase.master;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.regionserver.HStoreFile;
|
import org.apache.hadoop.hbase.regionserver.HStoreFile;
|
||||||
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
|
||||||
/** Instantiated to remove a column family from a table */
|
/** Instantiated to remove a column family from a table */
|
||||||
|
@ -45,11 +47,8 @@ class DeleteColumn extends ColumnOperation {
|
||||||
i.getTableDesc().removeFamily(columnName);
|
i.getTableDesc().removeFamily(columnName);
|
||||||
updateRegionInfo(server, m.getRegionName(), i);
|
updateRegionInfo(server, m.getRegionName(), i);
|
||||||
// Delete the directories used by the column
|
// Delete the directories used by the column
|
||||||
int encodedName = i.getEncodedName();
|
FSUtils.deleteColumnFamily(this.master.fs, tabledir, i.getEncodedName(),
|
||||||
this.master.fs.delete(
|
this.columnName);
|
||||||
HStoreFile.getMapDir(tabledir, encodedName, columnName), true);
|
|
||||||
this.master.fs.delete(
|
|
||||||
HStoreFile.getInfoDir(tabledir, encodedName, columnName), true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
|
@ -33,6 +35,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
* Instantiated to delete a table. Table must be offline.
|
* Instantiated to delete a table. Table must be offline.
|
||||||
*/
|
*/
|
||||||
class TableDelete extends TableOperation {
|
class TableDelete extends TableOperation {
|
||||||
|
private final Log LOG = LogFactory.getLog(this.getClass());
|
||||||
|
|
||||||
TableDelete(final HMaster master, final byte [] tableName) throws IOException {
|
TableDelete(final HMaster master, final byte [] tableName) throws IOException {
|
||||||
super(master, tableName);
|
super(master, tableName);
|
||||||
|
|
|
@ -25,59 +25,42 @@ import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
|
||||||
import org.apache.hadoop.hbase.HServerInfo;
|
import org.apache.hadoop.hbase.HServerInfo;
|
||||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
|
||||||
import org.apache.hadoop.hbase.io.RowResult;
|
import org.apache.hadoop.hbase.io.RowResult;
|
||||||
import org.apache.hadoop.hbase.util.Sleeper;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstract base class for operations that need to examine all HRegionInfo
|
* Abstract base class for operations that need to examine all HRegionInfo
|
||||||
* objects that make up a table. (For a table, operate on each of its rows
|
* objects in a table. (For a table, operate on each of its rows
|
||||||
* in .META.) To gain the
|
* in .META.).
|
||||||
*/
|
*/
|
||||||
abstract class TableOperation implements HConstants {
|
abstract class TableOperation implements HConstants {
|
||||||
static final Long ZERO_L = Long.valueOf(0L);
|
private final Set<MetaRegion> metaRegions;
|
||||||
|
protected final byte [] tableName;
|
||||||
protected static final Log LOG = LogFactory.getLog(TableOperation.class);
|
protected final Set<HRegionInfo> unservedRegions = new HashSet<HRegionInfo>();
|
||||||
|
|
||||||
protected Set<MetaRegion> metaRegions;
|
|
||||||
protected byte [] tableName;
|
|
||||||
protected Set<HRegionInfo> unservedRegions;
|
|
||||||
protected HMaster master;
|
protected HMaster master;
|
||||||
protected final int numRetries;
|
|
||||||
protected final Sleeper sleeper;
|
protected TableOperation(final HMaster master, final byte [] tableName)
|
||||||
|
|
||||||
protected TableOperation(final HMaster master, final byte [] tableName)
|
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.sleeper = master.sleeper;
|
|
||||||
this.numRetries = master.numRetries;
|
|
||||||
|
|
||||||
this.master = master;
|
this.master = master;
|
||||||
|
|
||||||
if (!this.master.isMasterRunning()) {
|
if (!this.master.isMasterRunning()) {
|
||||||
throw new MasterNotRunningException();
|
throw new MasterNotRunningException();
|
||||||
}
|
}
|
||||||
|
|
||||||
this.tableName = tableName;
|
this.tableName = tableName;
|
||||||
this.unservedRegions = new HashSet<HRegionInfo>();
|
|
||||||
|
|
||||||
// We can not access any meta region if they have not already been
|
// We can not access any meta region if they have not already been
|
||||||
// assigned and scanned.
|
// assigned and scanned.
|
||||||
|
|
||||||
if (master.regionManager.metaScannerThread.waitForMetaRegionsOrClose()) {
|
if (master.regionManager.metaScannerThread.waitForMetaRegionsOrClose()) {
|
||||||
// We're shutting down. Forget it.
|
// We're shutting down. Forget it.
|
||||||
throw new MasterNotRunningException();
|
throw new MasterNotRunningException();
|
||||||
}
|
}
|
||||||
|
|
||||||
this.metaRegions = master.regionManager.getMetaRegionsForTable(tableName);
|
this.metaRegions = master.regionManager.getMetaRegionsForTable(tableName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +130,7 @@ abstract class TableOperation implements HConstants {
|
||||||
postProcessMeta(m, server);
|
postProcessMeta(m, server);
|
||||||
unservedRegions.clear();
|
unservedRegions.clear();
|
||||||
|
|
||||||
return true;
|
return Boolean.TRUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,4 +161,4 @@ abstract class TableOperation implements HConstants {
|
||||||
|
|
||||||
protected abstract void postProcessMeta(MetaRegion m,
|
protected abstract void postProcessMeta(MetaRegion m,
|
||||||
HRegionInterface server) throws IOException;
|
HRegionInterface server) throws IOException;
|
||||||
}
|
}
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.FileSystemVersionException;
|
import org.apache.hadoop.hbase.FileSystemVersionException;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HStoreFile;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility methods for interacting with the underlying file system.
|
* Utility methods for interacting with the underlying file system.
|
||||||
|
@ -177,4 +178,19 @@ public class FSUtils {
|
||||||
public static String getPath(Path p) {
|
public static String getPath(Path p) {
|
||||||
return p.toUri().getPath();
|
return p.toUri().getPath();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete the directories used by the column family under the passed region.
|
||||||
|
* @param fs Filesystem to use.
|
||||||
|
* @param tabledir The directory under hbase.rootdir for this table.
|
||||||
|
* @param encodedRegionName The region name encoded.
|
||||||
|
* @param family Family to delete.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static void deleteColumnFamily(final FileSystem fs,
|
||||||
|
final Path tabledir, final int encodedRegionName, final byte [] family)
|
||||||
|
throws IOException {
|
||||||
|
fs.delete(HStoreFile.getMapDir(tabledir, encodedRegionName, family), true);
|
||||||
|
fs.delete(HStoreFile.getInfoDir(tabledir, encodedRegionName, family), true);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -22,28 +22,36 @@ package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.SortedMap;
|
import java.util.SortedMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
|
import org.apache.commons.httpclient.methods.GetMethod;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
import org.apache.hadoop.hbase.regionserver.HLog;
|
import org.apache.hadoop.hbase.regionserver.HLog;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Contains utility methods for manipulating HBase meta tables
|
* Contains utility methods for manipulating HBase meta tables.
|
||||||
|
* Be sure to call {@link #shutdown()} when done with this class so it closes
|
||||||
|
* resources opened during meta processing (ROOT, META, etc.).
|
||||||
*/
|
*/
|
||||||
public class MetaUtils {
|
public class MetaUtils {
|
||||||
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
|
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
|
||||||
|
@ -150,7 +158,11 @@ public class MetaUtils {
|
||||||
return meta;
|
return meta;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Closes root region if open. Also closes and deletes the HLog. */
|
/**
|
||||||
|
* Closes catalog regions if open. Also closes and deletes the HLog. You
|
||||||
|
* must call this method if you want to persist changes made during a
|
||||||
|
* MetaUtils edit session.
|
||||||
|
*/
|
||||||
public void shutdown() {
|
public void shutdown() {
|
||||||
if (this.rootRegion != null) {
|
if (this.rootRegion != null) {
|
||||||
try {
|
try {
|
||||||
|
@ -209,7 +221,6 @@ public class MetaUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open root region so we can scan it
|
// Open root region so we can scan it
|
||||||
|
|
||||||
if (this.rootRegion == null) {
|
if (this.rootRegion == null) {
|
||||||
openRootRegion();
|
openRootRegion();
|
||||||
}
|
}
|
||||||
|
@ -261,7 +272,7 @@ public class MetaUtils {
|
||||||
HRegion metaRegion = openMetaRegion(metaRegionInfo);
|
HRegion metaRegion = openMetaRegion(metaRegionInfo);
|
||||||
scanMetaRegion(metaRegion, listener);
|
scanMetaRegion(metaRegion, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scan the passed in metaregion <code>m</code> invoking the passed
|
* Scan the passed in metaregion <code>m</code> invoking the passed
|
||||||
* <code>listener</code> per row found.
|
* <code>listener</code> per row found.
|
||||||
|
@ -269,8 +280,7 @@ public class MetaUtils {
|
||||||
* @param listener
|
* @param listener
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void scanMetaRegion(final HRegion m,
|
public void scanMetaRegion(final HRegion m, final ScannerListener listener)
|
||||||
final ScannerListener listener)
|
|
||||||
throws IOException {
|
throws IOException {
|
||||||
InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY,
|
InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY,
|
||||||
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
|
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
|
||||||
|
@ -295,13 +305,13 @@ public class MetaUtils {
|
||||||
metaScanner.close();
|
metaScanner.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void openRootRegion() throws IOException {
|
private void openRootRegion() throws IOException {
|
||||||
this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
|
this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
|
||||||
this.rootdir, this.log, this.conf);
|
this.rootdir, this.log, this.conf);
|
||||||
this.rootRegion.compactStores();
|
this.rootRegion.compactStores();
|
||||||
}
|
}
|
||||||
|
|
||||||
private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
|
private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
|
||||||
HRegion meta =
|
HRegion meta =
|
||||||
HRegion.openHRegion(metaInfo, this.rootdir, this.log, this.conf);
|
HRegion.openHRegion(metaInfo, this.rootdir, this.log, this.conf);
|
||||||
|
@ -339,4 +349,117 @@ public class MetaUtils {
|
||||||
b.delete(HConstants.COL_STARTCODE);
|
b.delete(HConstants.COL_STARTCODE);
|
||||||
t.commit(b);
|
t.commit(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Offline version of the online TableOperation,
|
||||||
|
* org.apache.hadoop.hbase.master.AddColumn.
|
||||||
|
* @param tableName
|
||||||
|
* @param hcd Add this column to <code>tableName</code>
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public void addColumn(final byte [] tableName,
|
||||||
|
final HColumnDescriptor hcd)
|
||||||
|
throws IOException {
|
||||||
|
List<HRegionInfo> metas = getMETARowsInROOT();
|
||||||
|
for (HRegionInfo hri: metas) {
|
||||||
|
final HRegion m = getMetaRegion(hri);
|
||||||
|
scanMetaRegion(m, new ScannerListener() {
|
||||||
|
private boolean inTable = true;
|
||||||
|
|
||||||
|
@SuppressWarnings("synthetic-access")
|
||||||
|
public boolean processRow(HRegionInfo info) throws IOException {
|
||||||
|
LOG.debug("Testing " + Bytes.toString(tableName) + " against " +
|
||||||
|
Bytes.toString(info.getTableDesc().getName()));
|
||||||
|
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
|
||||||
|
this.inTable = false;
|
||||||
|
info.getTableDesc().addFamily(hcd);
|
||||||
|
updateMETARegionInfo(m, info);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// If we got here and we have not yet encountered the table yet,
|
||||||
|
// inTable will be false. Otherwise, we've passed out the table.
|
||||||
|
// Stop the scanner.
|
||||||
|
return this.inTable;
|
||||||
|
}});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Offline version of the online TableOperation,
|
||||||
|
* org.apache.hadoop.hbase.master.DeleteColumn.
|
||||||
|
* @param tableName
|
||||||
|
* @param columnFamily Name of column name to remove.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public void deleteColumn(final byte [] tableName,
|
||||||
|
final byte [] columnFamily) throws IOException {
|
||||||
|
List<HRegionInfo> metas = getMETARowsInROOT();
|
||||||
|
final Path tabledir = new Path(rootdir, Bytes.toString(tableName));
|
||||||
|
for (HRegionInfo hri: metas) {
|
||||||
|
final HRegion m = getMetaRegion(hri);
|
||||||
|
scanMetaRegion(m, new ScannerListener() {
|
||||||
|
private boolean inTable = true;
|
||||||
|
|
||||||
|
@SuppressWarnings("synthetic-access")
|
||||||
|
public boolean processRow(HRegionInfo info) throws IOException {
|
||||||
|
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
|
||||||
|
this.inTable = false;
|
||||||
|
info.getTableDesc().removeFamily(columnFamily);
|
||||||
|
updateMETARegionInfo(m, info);
|
||||||
|
FSUtils.deleteColumnFamily(fs, tabledir, info.getEncodedName(),
|
||||||
|
HStoreKey.getFamily(columnFamily));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// If we got here and we have not yet encountered the table yet,
|
||||||
|
// inTable will be false. Otherwise, we've passed out the table.
|
||||||
|
// Stop the scanner.
|
||||||
|
return this.inTable;
|
||||||
|
}});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void updateMETARegionInfo(HRegion r, final HRegionInfo hri)
|
||||||
|
throws IOException {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
HRegionInfo h = Writables.getHRegionInfoOrNull(
|
||||||
|
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
|
||||||
|
LOG.debug("Old " + Bytes.toString(HConstants.COL_REGIONINFO) +
|
||||||
|
" for " + hri.toString() + " in " + r.toString() + " is: " +
|
||||||
|
h.toString());
|
||||||
|
}
|
||||||
|
BatchUpdate b = new BatchUpdate(hri.getRegionName());
|
||||||
|
b.put(HConstants.COL_REGIONINFO, Writables.getBytes(hri));
|
||||||
|
r.batchUpdate(b);
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
HRegionInfo h = Writables.getHRegionInfoOrNull(
|
||||||
|
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
|
||||||
|
LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
|
||||||
|
" for " + hri.toString() + " in " + r.toString() + " is: " +
|
||||||
|
h.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return List of <code>.META.<code> {@link HRegionInfo} found in the
|
||||||
|
* <code>-ROOT-</code> table.
|
||||||
|
* @throws IOException
|
||||||
|
* @see #getMetaRegion(HRegionInfo)
|
||||||
|
*/
|
||||||
|
public List<HRegionInfo> getMETARowsInROOT() throws IOException {
|
||||||
|
if (!initialized) {
|
||||||
|
throw new IllegalStateException("Must call initialize method first.");
|
||||||
|
}
|
||||||
|
final List<HRegionInfo> result = new ArrayList<HRegionInfo>();
|
||||||
|
scanRootRegion(new ScannerListener() {
|
||||||
|
@SuppressWarnings("unused")
|
||||||
|
public boolean processRow(HRegionInfo info) throws IOException {
|
||||||
|
if (Bytes.equals(info.getTableDesc().getName(),
|
||||||
|
HConstants.META_TABLE_NAME)) {
|
||||||
|
result.add(info);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -68,9 +68,9 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void HBaseClusterSetup() throws Exception {
|
protected void hBaseClusterSetup() throws Exception {
|
||||||
if (startMiniHBase) {
|
if (startMiniHBase) {
|
||||||
super.HBaseClusterSetup();
|
super.hBaseClusterSetup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
|
||||||
* Actually start the MiniHBase instance.
|
* Actually start the MiniHBase instance.
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unused")
|
@SuppressWarnings("unused")
|
||||||
protected void HBaseClusterSetup() throws Exception {
|
protected void hBaseClusterSetup() throws Exception {
|
||||||
// start the mini cluster
|
// start the mini cluster
|
||||||
this.cluster = new MiniHBaseCluster(conf, regionServers);
|
this.cluster = new MiniHBaseCluster(conf, regionServers);
|
||||||
// opening the META table ensures that cluster is running
|
// opening the META table ensures that cluster is running
|
||||||
|
@ -125,7 +125,7 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
|
||||||
preHBaseClusterSetup();
|
preHBaseClusterSetup();
|
||||||
|
|
||||||
// start the instance
|
// start the instance
|
||||||
HBaseClusterSetup();
|
hBaseClusterSetup();
|
||||||
|
|
||||||
// run post-cluster setup
|
// run post-cluster setup
|
||||||
postHBaseClusterSetup();
|
postHBaseClusterSetup();
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2008 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
|
||||||
|
|
||||||
|
public class TestMetaUtils extends HBaseClusterTestCase {
|
||||||
|
public void testColumnEdits() throws Exception {
|
||||||
|
HBaseAdmin admin = new HBaseAdmin(this.conf);
|
||||||
|
final String oldColumn = "oldcolumn:";
|
||||||
|
// Add three tables
|
||||||
|
for (int i = 0; i < 5; i++) {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(getName() + i);
|
||||||
|
htd.addFamily(new HColumnDescriptor(oldColumn));
|
||||||
|
admin.createTable(htd);
|
||||||
|
}
|
||||||
|
this.cluster.shutdown();
|
||||||
|
this.cluster = null;
|
||||||
|
MetaUtils utils = new MetaUtils(this.conf);
|
||||||
|
utils.initialize();
|
||||||
|
// Add a new column to the third table, getName() + '2', and remove the old.
|
||||||
|
final byte [] editTable = Bytes.toBytes(getName() + 2);
|
||||||
|
final byte [] newColumn = Bytes.toBytes("newcolumn:");
|
||||||
|
utils.addColumn(editTable, new HColumnDescriptor(newColumn));
|
||||||
|
utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
|
||||||
|
utils.shutdown();
|
||||||
|
// Now assert columns were added and deleted.
|
||||||
|
this.cluster = new MiniHBaseCluster(this.conf, 1);
|
||||||
|
HTable t = new HTable(conf, editTable);
|
||||||
|
HTableDescriptor htd = t.getMetadata();
|
||||||
|
HColumnDescriptor hcd = htd.getFamily(newColumn);
|
||||||
|
assertTrue(hcd != null);
|
||||||
|
assertNull(htd.getFamily(Bytes.toBytes(oldColumn)));
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue