HBASE-659 HLog#cacheFlushLock not cleared; hangs a region; revert code mistakenly committed

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@662147 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-06-01 05:06:52 +00:00
parent 359fdf6238
commit 7e0dd33fb7
10 changed files with 43 additions and 179 deletions

View File

@ -11,9 +11,9 @@
# Run the java magic include and import basic HBase types.
include Java
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.HTableDescriptor
@ -30,17 +30,14 @@ def hbase
puts ' version Output HBase version'
end
def versionstr
"Version: #{org.apache.hadoop.hbase.util.VersionInfo.getVersion()},\
def version
puts "Version: #{org.apache.hadoop.hbase.util.VersionInfo.getVersion()},\
r#{org.apache.hadoop.hbase.util.VersionInfo.getRevision()},\
#{org.apache.hadoop.hbase.util.VersionInfo.getDate()}"
end
def version
puts versionstr()
end
# Output a banner message that tells users where to go for help
# TODO: Test that we're in irb context. For now presume it.
puts "HBase Shell; " + versionstr()
puts "+ Type 'hbase<RETURN>' for list of HBase commands"
# TODO: Test that we are in shell context.
puts "HBase Shell; type 'hbase<RETURN>' for the list of supported HBase commands"
version

View File

@ -23,7 +23,6 @@
# The java implementation to use. Required.
# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
export JAVA_HOME=/usr
# Extra Java CLASSPATH elements. Optional.
# export HBASE_CLASSPATH=

View File

@ -22,29 +22,4 @@
*/
-->
<configuration>
<property>
<name>hbase.master</name>
<value>durruti.local:60000</value>
<description>The host and port that the HBase master runs at.
A value of 'local' runs the master and a regionserver in
a single process.
</description>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://durruti.local:10000/hbase</value>
<description>The directory shared by region servers.
Should be fully-qualified to include the filesystem to use.
E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
</description>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication.
The actual number of replications can be specified when the file is created.
The default is used if replication is not specified in create time.
</description>
</property>
</configuration>

View File

@ -41,6 +41,3 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}:
# Custom Logging levels
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
log4j.logger.org.apache.hadoop.fs=DEBUG
log4j.logger.org.apache.hadoop.dfs=DEBUG
log4j.logger.org.apache.hadoop.hbase=DEBUG

View File

@ -25,8 +25,6 @@ import java.util.HashSet;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.io.BatchUpdate;
@ -35,7 +33,6 @@ import org.apache.hadoop.hbase.util.Writables;
/** Instantiated to enable or disable a table */
class ChangeTableState extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
private boolean online;
protected final Map<String, HashSet<HRegionInfo>> servedRegions =

View File

@ -20,9 +20,6 @@
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.io.BatchUpdate;
@ -30,7 +27,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.util.Writables;
abstract class ColumnOperation extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
protected ColumnOperation(final HMaster master, final byte [] tableName)
throws IOException {
@ -57,4 +53,4 @@ abstract class ColumnOperation extends TableOperation {
LOG.debug("updated columns in row: " + i.getRegionName());
}
}
}
}

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
@ -35,7 +33,6 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
* Instantiated to delete a table. Table must be offline.
*/
class TableDelete extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
TableDelete(final HMaster master, final byte [] tableName) throws IOException {
super(master, tableName);

View File

@ -25,42 +25,59 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Sleeper;
/**
* Abstract base class for operations that need to examine all HRegionInfo
* objects in a table. (For a table, operate on each of its rows
* in .META.).
* objects that make up a table. (For a table, operate on each of its rows
* in .META.) To gain the
*/
abstract class TableOperation implements HConstants {
private final Set<MetaRegion> metaRegions;
protected final byte [] tableName;
protected final Set<HRegionInfo> unservedRegions = new HashSet<HRegionInfo>();
static final Long ZERO_L = Long.valueOf(0L);
protected static final Log LOG = LogFactory.getLog(TableOperation.class);
protected Set<MetaRegion> metaRegions;
protected byte [] tableName;
protected Set<HRegionInfo> unservedRegions;
protected HMaster master;
protected TableOperation(final HMaster master, final byte [] tableName)
protected final int numRetries;
protected final Sleeper sleeper;
protected TableOperation(final HMaster master, final byte [] tableName)
throws IOException {
this.sleeper = master.sleeper;
this.numRetries = master.numRetries;
this.master = master;
if (!this.master.isMasterRunning()) {
throw new MasterNotRunningException();
}
this.tableName = tableName;
this.unservedRegions = new HashSet<HRegionInfo>();
// We can not access any meta region if they have not already been
// assigned and scanned.
if (master.regionManager.metaScannerThread.waitForMetaRegionsOrClose()) {
// We're shutting down. Forget it.
throw new MasterNotRunningException();
}
this.metaRegions = master.regionManager.getMetaRegionsForTable(tableName);
}
@ -130,7 +147,7 @@ abstract class TableOperation implements HConstants {
postProcessMeta(m, server);
unservedRegions.clear();
return Boolean.TRUE;
return true;
}
}
@ -161,4 +178,4 @@ abstract class TableOperation implements HConstants {
protected abstract void postProcessMeta(MetaRegion m,
HRegionInterface server) throws IOException;
}
}

View File

@ -22,36 +22,28 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
/**
* Contains utility methods for manipulating HBase meta tables.
* Be sure to call {@link #shutdown()} when done with this class so it closes
* resources opened during meta processing (ROOT, META, etc.).
* Contains utility methods for manipulating HBase meta tables
*/
public class MetaUtils {
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
@ -217,6 +209,7 @@ public class MetaUtils {
}
// Open root region so we can scan it
if (this.rootRegion == null) {
openRootRegion();
}
@ -268,7 +261,7 @@ public class MetaUtils {
HRegion metaRegion = openMetaRegion(metaRegionInfo);
scanMetaRegion(metaRegion, listener);
}
/**
* Scan the passed in metaregion <code>m</code> invoking the passed
* <code>listener</code> per row found.
@ -276,7 +269,8 @@ public class MetaUtils {
* @param listener
* @throws IOException
*/
public void scanMetaRegion(final HRegion m, final ScannerListener listener)
public void scanMetaRegion(final HRegion m,
final ScannerListener listener)
throws IOException {
InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY,
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
@ -301,13 +295,13 @@ public class MetaUtils {
metaScanner.close();
}
}
private void openRootRegion() throws IOException {
this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
this.rootdir, this.log, this.conf);
this.rootRegion.compactStores();
}
private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
HRegion meta =
HRegion.openHRegion(metaInfo, this.rootdir, this.log, this.conf);
@ -345,99 +339,4 @@ public class MetaUtils {
b.delete(HConstants.COL_STARTCODE);
t.commit(b);
}
/**
* @param tableName
* @param hcd Add this column to <code>tableName</code>
* @throws IOException
*/
public void addColumn(final byte [] tableName,
final HColumnDescriptor hcd)
throws IOException {
List<HRegionInfo> metas = getMETARowsInROOT();
for (HRegionInfo hri: metas) {
final HRegion m = getMetaRegion(hri);
scanMetaRegion(m, new ScannerListener() {
private boolean inTable = false;
@SuppressWarnings("synthetic-access")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
this.inTable = true;
info.getTableDesc().addFamily(hcd);
updateMETARegionInfo(m, info);
return false;
}
// If we got here and we have not yet encountered the table yet,
// inTable will be false. Otherwise, we've passed out the table.
// Stop the scanner.
return this.inTable;
}});
}
}
/**
* @param tableName
* @param columnFamily Name of column name to remove.
* @throws IOException
*/
public void deleteColumn(final byte [] tableName,
final byte [] columnFamily) throws IOException {
List<HRegionInfo> metas = getMETARowsInROOT();
for (HRegionInfo hri: metas) {
final HRegion m = getMetaRegion(hri);
scanMetaRegion(m, new ScannerListener() {
private boolean inTable = false;
@SuppressWarnings("synthetic-access")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
this.inTable = true;
info.getTableDesc().removeFamily(columnFamily);
updateMETARegionInfo(m, info);
return false;
}
// If we got here and we have not yet encountered the table yet,
// inTable will be false. Otherwise, we've passed out the table.
// Stop the scanner.
return this.inTable;
}});
}
}
private void updateMETARegionInfo(HRegion r, final HRegionInfo hri)
throws IOException {
BatchUpdate b = new BatchUpdate(hri.getRegionName());
b.put(HConstants.COL_REGIONINFO, Writables.getBytes(hri));
r.batchUpdate(b);
if (LOG.isDebugEnabled()) {
LOG.debug("Updated " + Bytes.toString(HConstants.COL_REGIONINFO) +
" column in row " + hri.getRegionName() + " in " +
Bytes.toString(r.getRegionName()));
}
}
/**
* @return List of <code>.META.<code> {@link HRegionInfo} found in the
* <code>-ROOT-</code> table.
* @throws IOException
* @see #getMetaRegion(HRegionInfo)
*/
public List<HRegionInfo> getMETARowsInROOT() throws IOException {
if (!initialized) {
throw new IllegalStateException("Must call initialize method first.");
}
final List<HRegionInfo> result = new ArrayList<HRegionInfo>();
scanRootRegion(new ScannerListener() {
@SuppressWarnings("unused")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(),
HConstants.META_TABLE_NAME)) {
result.add(info);
return false;
}
return true;
}});
return result;
}
}

View File

@ -9,11 +9,6 @@ Automatically created by Tomcat JspC.
<web-app>
<servlet>
<servlet-name>org.apache.hadoop.hbase.generated.master.loader_jsp</servlet-name>
<servlet-class>org.apache.hadoop.hbase.generated.master.loader_jsp</servlet-class>
</servlet>
<servlet>
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
<servlet-class>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-class>
@ -24,11 +19,6 @@ Automatically created by Tomcat JspC.
<servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>org.apache.hadoop.hbase.generated.master.loader_jsp</servlet-name>
<url-pattern>/loader.jsp</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
<url-pattern>/master.jsp</url-pattern>