HBASE-660 [Migration] addColumn/deleteColumn functionality in MetaUtils

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@662531 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-06-02 19:36:55 +00:00
parent a3a4fcce15
commit 3d1b788109
11 changed files with 238 additions and 48 deletions

View File

@ -299,7 +299,8 @@ public class HStoreKey implements WritableComparable {
/**
* @param column
* @return New byte array that holds <code>column</code> family prefix.
* @return New byte array that holds <code>column</code> family prefix only
* (Does not include the colon DELIMITER).
* @throws ColumnNameParseException
* @see #parseColumn(byte[])
*/

View File

@ -25,6 +25,8 @@ import java.util.HashSet;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.io.BatchUpdate;
@ -33,6 +35,7 @@ import org.apache.hadoop.hbase.util.Writables;
/** Instantiated to enable or disable a table */
class ChangeTableState extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
private boolean online;
protected final Map<String, HashSet<HRegionInfo>> servedRegions =

View File

@ -20,6 +20,9 @@
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.io.BatchUpdate;
@ -27,6 +30,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.util.Writables;
abstract class ColumnOperation extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
protected ColumnOperation(final HMaster master, final byte [] tableName)
throws IOException {

View File

@ -22,8 +22,10 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
/** Instantiated to remove a column family from a table */
@ -45,11 +47,8 @@ class DeleteColumn extends ColumnOperation {
i.getTableDesc().removeFamily(columnName);
updateRegionInfo(server, m.getRegionName(), i);
// Delete the directories used by the column
int encodedName = i.getEncodedName();
this.master.fs.delete(
HStoreFile.getMapDir(tabledir, encodedName, columnName), true);
this.master.fs.delete(
HStoreFile.getInfoDir(tabledir, encodedName, columnName), true);
FSUtils.deleteColumnFamily(this.master.fs, tabledir, i.getEncodedName(),
this.columnName);
}
}
}

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
@ -33,6 +35,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
* Instantiated to delete a table. Table must be offline.
*/
class TableDelete extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
TableDelete(final HMaster master, final byte [] tableName) throws IOException {
super(master, tableName);

View File

@ -25,59 +25,42 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
/**
* Abstract base class for operations that need to examine all HRegionInfo
* objects that make up a table. (For a table, operate on each of its rows
* in .META.) To gain the
* objects in a table. (For a table, operate on each of its rows
* in .META.).
*/
abstract class TableOperation implements HConstants {
static final Long ZERO_L = Long.valueOf(0L);
protected static final Log LOG = LogFactory.getLog(TableOperation.class);
protected Set<MetaRegion> metaRegions;
protected byte [] tableName;
protected Set<HRegionInfo> unservedRegions;
private final Set<MetaRegion> metaRegions;
protected final byte [] tableName;
protected final Set<HRegionInfo> unservedRegions = new HashSet<HRegionInfo>();
protected HMaster master;
protected final int numRetries;
protected final Sleeper sleeper;
protected TableOperation(final HMaster master, final byte [] tableName)
throws IOException {
this.sleeper = master.sleeper;
this.numRetries = master.numRetries;
this.master = master;
if (!this.master.isMasterRunning()) {
throw new MasterNotRunningException();
}
this.tableName = tableName;
this.unservedRegions = new HashSet<HRegionInfo>();
// We can not access any meta region if they have not already been
// assigned and scanned.
if (master.regionManager.metaScannerThread.waitForMetaRegionsOrClose()) {
// We're shutting down. Forget it.
throw new MasterNotRunningException();
}
this.metaRegions = master.regionManager.getMetaRegionsForTable(tableName);
}
@ -147,7 +130,7 @@ abstract class TableOperation implements HConstants {
postProcessMeta(m, server);
unservedRegions.clear();
return true;
return Boolean.TRUE;
}
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.FileSystemVersionException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
/**
* Utility methods for interacting with the underlying file system.
@ -177,4 +178,19 @@ public class FSUtils {
public static String getPath(Path p) {
return p.toUri().getPath();
}
/**
* Delete the directories used by the column family under the passed region.
* @param fs Filesystem to use.
* @param tabledir The directory under hbase.rootdir for this table.
* @param encodedRegionName The region name encoded.
* @param family Family to delete.
* @throws IOException
*/
public static void deleteColumnFamily(final FileSystem fs,
final Path tabledir, final int encodedRegionName, final byte [] family)
throws IOException {
fs.delete(HStoreFile.getMapDir(tabledir, encodedRegionName, family), true);
fs.delete(HStoreFile.getInfoDir(tabledir, encodedRegionName, family), true);
}
}

View File

@ -22,28 +22,36 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
/**
* Contains utility methods for manipulating HBase meta tables
* Contains utility methods for manipulating HBase meta tables.
* Be sure to call {@link #shutdown()} when done with this class so it closes
* resources opened during meta processing (ROOT, META, etc.).
*/
public class MetaUtils {
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
@ -150,7 +158,11 @@ public class MetaUtils {
return meta;
}
/** Closes root region if open. Also closes and deletes the HLog. */
/**
* Closes catalog regions if open. Also closes and deletes the HLog. You
* must call this method if you want to persist changes made during a
* MetaUtils edit session.
*/
public void shutdown() {
if (this.rootRegion != null) {
try {
@ -209,7 +221,6 @@ public class MetaUtils {
}
// Open root region so we can scan it
if (this.rootRegion == null) {
openRootRegion();
}
@ -269,8 +280,7 @@ public class MetaUtils {
* @param listener
* @throws IOException
*/
public void scanMetaRegion(final HRegion m,
final ScannerListener listener)
public void scanMetaRegion(final HRegion m, final ScannerListener listener)
throws IOException {
InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY,
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
@ -339,4 +349,117 @@ public class MetaUtils {
b.delete(HConstants.COL_STARTCODE);
t.commit(b);
}
/**
* Offline version of the online TableOperation,
* org.apache.hadoop.hbase.master.AddColumn.
* @param tableName
* @param hcd Add this column to <code>tableName</code>
* @throws IOException
*/
public void addColumn(final byte [] tableName,
final HColumnDescriptor hcd)
throws IOException {
List<HRegionInfo> metas = getMETARowsInROOT();
for (HRegionInfo hri: metas) {
final HRegion m = getMetaRegion(hri);
scanMetaRegion(m, new ScannerListener() {
private boolean inTable = true;
@SuppressWarnings("synthetic-access")
public boolean processRow(HRegionInfo info) throws IOException {
LOG.debug("Testing " + Bytes.toString(tableName) + " against " +
Bytes.toString(info.getTableDesc().getName()));
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
this.inTable = false;
info.getTableDesc().addFamily(hcd);
updateMETARegionInfo(m, info);
return true;
}
// If we got here and we have not yet encountered the table yet,
// inTable will be false. Otherwise, we've passed out the table.
// Stop the scanner.
return this.inTable;
}});
}
}
/**
* Offline version of the online TableOperation,
* org.apache.hadoop.hbase.master.DeleteColumn.
* @param tableName
* @param columnFamily Name of column name to remove.
* @throws IOException
*/
public void deleteColumn(final byte [] tableName,
final byte [] columnFamily) throws IOException {
List<HRegionInfo> metas = getMETARowsInROOT();
final Path tabledir = new Path(rootdir, Bytes.toString(tableName));
for (HRegionInfo hri: metas) {
final HRegion m = getMetaRegion(hri);
scanMetaRegion(m, new ScannerListener() {
private boolean inTable = true;
@SuppressWarnings("synthetic-access")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
this.inTable = false;
info.getTableDesc().removeFamily(columnFamily);
updateMETARegionInfo(m, info);
FSUtils.deleteColumnFamily(fs, tabledir, info.getEncodedName(),
HStoreKey.getFamily(columnFamily));
return false;
}
// If we got here and we have not yet encountered the table yet,
// inTable will be false. Otherwise, we've passed out the table.
// Stop the scanner.
return this.inTable;
}});
}
}
private void updateMETARegionInfo(HRegion r, final HRegionInfo hri)
throws IOException {
if (LOG.isDebugEnabled()) {
HRegionInfo h = Writables.getHRegionInfoOrNull(
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
LOG.debug("Old " + Bytes.toString(HConstants.COL_REGIONINFO) +
" for " + hri.toString() + " in " + r.toString() + " is: " +
h.toString());
}
BatchUpdate b = new BatchUpdate(hri.getRegionName());
b.put(HConstants.COL_REGIONINFO, Writables.getBytes(hri));
r.batchUpdate(b);
if (LOG.isDebugEnabled()) {
HRegionInfo h = Writables.getHRegionInfoOrNull(
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
" for " + hri.toString() + " in " + r.toString() + " is: " +
h.toString());
}
}
/**
* @return List of <code>.META.<code> {@link HRegionInfo} found in the
* <code>-ROOT-</code> table.
* @throws IOException
* @see #getMetaRegion(HRegionInfo)
*/
public List<HRegionInfo> getMETARowsInROOT() throws IOException {
if (!initialized) {
throw new IllegalStateException("Must call initialize method first.");
}
final List<HRegionInfo> result = new ArrayList<HRegionInfo>();
scanRootRegion(new ScannerListener() {
@SuppressWarnings("unused")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(),
HConstants.META_TABLE_NAME)) {
result.add(info);
return false;
}
return true;
}});
return result;
}
}

View File

@ -68,9 +68,9 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase {
}
@Override
protected void HBaseClusterSetup() throws Exception {
protected void hBaseClusterSetup() throws Exception {
if (startMiniHBase) {
super.HBaseClusterSetup();
super.hBaseClusterSetup();
}
}

View File

@ -86,7 +86,7 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
* Actually start the MiniHBase instance.
*/
@SuppressWarnings("unused")
protected void HBaseClusterSetup() throws Exception {
protected void hBaseClusterSetup() throws Exception {
// start the mini cluster
this.cluster = new MiniHBaseCluster(conf, regionServers);
// opening the META table ensures that cluster is running
@ -125,7 +125,7 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
preHBaseClusterSetup();
// start the instance
HBaseClusterSetup();
hBaseClusterSetup();
// run post-cluster setup
postHBaseClusterSetup();

View File

@ -0,0 +1,58 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
public class TestMetaUtils extends HBaseClusterTestCase {
public void testColumnEdits() throws Exception {
HBaseAdmin admin = new HBaseAdmin(this.conf);
final String oldColumn = "oldcolumn:";
// Add three tables
for (int i = 0; i < 5; i++) {
HTableDescriptor htd = new HTableDescriptor(getName() + i);
htd.addFamily(new HColumnDescriptor(oldColumn));
admin.createTable(htd);
}
this.cluster.shutdown();
this.cluster = null;
MetaUtils utils = new MetaUtils(this.conf);
utils.initialize();
// Add a new column to the third table, getName() + '2', and remove the old.
final byte [] editTable = Bytes.toBytes(getName() + 2);
final byte [] newColumn = Bytes.toBytes("newcolumn:");
utils.addColumn(editTable, new HColumnDescriptor(newColumn));
utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
utils.shutdown();
// Now assert columns were added and deleted.
this.cluster = new MiniHBaseCluster(this.conf, 1);
HTable t = new HTable(conf, editTable);
HTableDescriptor htd = t.getMetadata();
HColumnDescriptor hcd = htd.getFamily(newColumn);
assertTrue(hcd != null);
assertNull(htd.getFamily(Bytes.toBytes(oldColumn)));
}
}