HBASE-659 HLog#cacheFlushLock not cleared; hangs a region

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@662146 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-06-01 04:42:11 +00:00
parent b5cd4f69a2
commit 359fdf6238
13 changed files with 194 additions and 50 deletions

View File

@ -34,6 +34,7 @@ Hbase Change Log
HBASE-649 API polluted with default and protected access data members and methods HBASE-649 API polluted with default and protected access data members and methods
HBASE-650 Add String versions of get, scanner, put in HTable HBASE-650 Add String versions of get, scanner, put in HTable
HBASE-656 Do not retry exceptions such as unknown scanner or illegal argument HBASE-656 Do not retry exceptions such as unknown scanner or illegal argument
HBASE-659 HLog#cacheFlushLock not cleared; hangs a region
IMPROVEMENTS IMPROVEMENTS
HBASE-559 MR example job to count table rows HBASE-559 MR example job to count table rows

View File

@ -11,9 +11,9 @@
# Run the java magic include and import basic HBase types. # Run the java magic include and import basic HBase types.
include Java include Java
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.HTable import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.HBaseAdmin import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.HColumnDescriptor import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.HConstants import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.HTableDescriptor import org.apache.hadoop.hbase.HTableDescriptor
@ -30,14 +30,17 @@ def hbase
puts ' version Output HBase version' puts ' version Output HBase version'
end end
def version def versionstr
puts "Version: #{org.apache.hadoop.hbase.util.VersionInfo.getVersion()},\ "Version: #{org.apache.hadoop.hbase.util.VersionInfo.getVersion()},\
r#{org.apache.hadoop.hbase.util.VersionInfo.getRevision()},\ r#{org.apache.hadoop.hbase.util.VersionInfo.getRevision()},\
#{org.apache.hadoop.hbase.util.VersionInfo.getDate()}" #{org.apache.hadoop.hbase.util.VersionInfo.getDate()}"
end
def version
puts versionstr()
end end
# Output a banner message that tells users where to go for help # Output a banner message that tells users where to go for help
# TODO: Test that we're in irb context. For now presume it. # TODO: Test that we're in irb context. For now presume it.
# TODO: Test that we are in shell context. puts "HBase Shell; " + versionstr()
puts "HBase Shell; type 'hbase<RETURN>' for the list of supported HBase commands" puts "+ Type 'hbase<RETURN>' for list of HBase commands"
version

View File

@ -23,6 +23,7 @@
# The java implementation to use. Required. # The java implementation to use. Required.
# export JAVA_HOME=/usr/lib/j2sdk1.5-sun # export JAVA_HOME=/usr/lib/j2sdk1.5-sun
export JAVA_HOME=/usr
# Extra Java CLASSPATH elements. Optional. # Extra Java CLASSPATH elements. Optional.
# export HBASE_CLASSPATH= # export HBASE_CLASSPATH=

View File

@ -22,4 +22,29 @@
*/ */
--> -->
<configuration> <configuration>
<property>
<name>hbase.master</name>
<value>durruti.local:60000</value>
<description>The host and port that the HBase master runs at.
A value of 'local' runs the master and a regionserver in
a single process.
</description>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://durruti.local:10000/hbase</value>
<description>The directory shared by region servers.
Should be fully-qualified to include the filesystem to use.
E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
</description>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication.
The actual number of replications can be specified when the file is created.
The default is used if replication is not specified in create time.
</description>
</property>
</configuration> </configuration>

View File

@ -41,3 +41,6 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}:
# Custom Logging levels # Custom Logging levels
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
log4j.logger.org.apache.hadoop.fs=DEBUG
log4j.logger.org.apache.hadoop.dfs=DEBUG
log4j.logger.org.apache.hadoop.hbase=DEBUG

View File

@ -25,6 +25,8 @@ import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
@ -33,6 +35,7 @@ import org.apache.hadoop.hbase.util.Writables;
/** Instantiated to enable or disable a table */ /** Instantiated to enable or disable a table */
class ChangeTableState extends TableOperation { class ChangeTableState extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
private boolean online; private boolean online;
protected final Map<String, HashSet<HRegionInfo>> servedRegions = protected final Map<String, HashSet<HRegionInfo>> servedRegions =

View File

@ -20,6 +20,9 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
@ -27,6 +30,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
abstract class ColumnOperation extends TableOperation { abstract class ColumnOperation extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
protected ColumnOperation(final HMaster master, final byte [] tableName) protected ColumnOperation(final HMaster master, final byte [] tableName)
throws IOException { throws IOException {
@ -53,4 +57,4 @@ abstract class ColumnOperation extends TableOperation {
LOG.debug("updated columns in row: " + i.getRegionName()); LOG.debug("updated columns in row: " + i.getRegionName());
} }
} }
} }

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
@ -33,6 +35,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
* Instantiated to delete a table. Table must be offline. * Instantiated to delete a table. Table must be offline.
*/ */
class TableDelete extends TableOperation { class TableDelete extends TableOperation {
private final Log LOG = LogFactory.getLog(this.getClass());
TableDelete(final HMaster master, final byte [] tableName) throws IOException { TableDelete(final HMaster master, final byte [] tableName) throws IOException {
super(master, tableName); super(master, tableName);

View File

@ -25,59 +25,42 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
/** /**
* Abstract base class for operations that need to examine all HRegionInfo * Abstract base class for operations that need to examine all HRegionInfo
* objects that make up a table. (For a table, operate on each of its rows * objects in a table. (For a table, operate on each of its rows
* in .META.) To gain the * in .META.).
*/ */
abstract class TableOperation implements HConstants { abstract class TableOperation implements HConstants {
static final Long ZERO_L = Long.valueOf(0L); private final Set<MetaRegion> metaRegions;
protected final byte [] tableName;
protected static final Log LOG = LogFactory.getLog(TableOperation.class); protected final Set<HRegionInfo> unservedRegions = new HashSet<HRegionInfo>();
protected Set<MetaRegion> metaRegions;
protected byte [] tableName;
protected Set<HRegionInfo> unservedRegions;
protected HMaster master; protected HMaster master;
protected final int numRetries;
protected final Sleeper sleeper; protected TableOperation(final HMaster master, final byte [] tableName)
protected TableOperation(final HMaster master, final byte [] tableName)
throws IOException { throws IOException {
this.sleeper = master.sleeper;
this.numRetries = master.numRetries;
this.master = master; this.master = master;
if (!this.master.isMasterRunning()) { if (!this.master.isMasterRunning()) {
throw new MasterNotRunningException(); throw new MasterNotRunningException();
} }
this.tableName = tableName; this.tableName = tableName;
this.unservedRegions = new HashSet<HRegionInfo>();
// We can not access any meta region if they have not already been // We can not access any meta region if they have not already been
// assigned and scanned. // assigned and scanned.
if (master.regionManager.metaScannerThread.waitForMetaRegionsOrClose()) { if (master.regionManager.metaScannerThread.waitForMetaRegionsOrClose()) {
// We're shutting down. Forget it. // We're shutting down. Forget it.
throw new MasterNotRunningException(); throw new MasterNotRunningException();
} }
this.metaRegions = master.regionManager.getMetaRegionsForTable(tableName); this.metaRegions = master.regionManager.getMetaRegionsForTable(tableName);
} }
@ -147,7 +130,7 @@ abstract class TableOperation implements HConstants {
postProcessMeta(m, server); postProcessMeta(m, server);
unservedRegions.clear(); unservedRegions.clear();
return true; return Boolean.TRUE;
} }
} }
@ -178,4 +161,4 @@ abstract class TableOperation implements HConstants {
protected abstract void postProcessMeta(MetaRegion m, protected abstract void postProcessMeta(MetaRegion m,
HRegionInterface server) throws IOException; HRegionInterface server) throws IOException;
} }

View File

@ -1009,12 +1009,17 @@ public class HRegion implements HConstants {
this.memcacheSize.set(0); this.memcacheSize.set(0);
} }
} }
} catch (IOException e) { } catch (Throwable t) {
// An exception here means that the snapshot was not persisted. // An exception here means that the snapshot was not persisted.
// The hlog needs to be replayed so its content is restored to memcache. // The hlog needs to be replayed so its content is restored to memcache.
// Currently, only a server restart will do this. // Currently, only a server restart will do this.
// We used to only catch IOEs but its possible that we'd get other
// exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch
// all and sundry.
this.log.abortCacheFlush(); this.log.abortCacheFlush();
throw new DroppedSnapshotException(e.getMessage()); DroppedSnapshotException dse = new DroppedSnapshotException();
dse.initCause(t);
throw dse;
} }
// If we get to here, the HStores have been written. If we get an // If we get to here, the HStores have been written. If we get an

View File

@ -74,7 +74,9 @@ implements ChangedReadersObserver {
private void openReaders(final byte [] firstRow) throws IOException { private void openReaders(final byte [] firstRow) throws IOException {
if (this.readers != null) { if (this.readers != null) {
for (int i = 0; i < this.readers.length; i++) { for (int i = 0; i < this.readers.length; i++) {
this.readers[i].close(); if (this.readers[i] != null) {
this.readers[i].close();
}
} }
} }
// Open our own copies of the Readers here inside in the scanner. // Open our own copies of the Readers here inside in the scanner.
@ -247,7 +249,7 @@ implements ChangedReadersObserver {
* @param firstRow seek to this row * @param firstRow seek to this row
* @return true if this is the first row or if the row was not found * @return true if this is the first row or if the row was not found
*/ */
boolean findFirstRow(int i, final byte [] firstRow) throws IOException { private boolean findFirstRow(int i, final byte [] firstRow) throws IOException {
ImmutableBytesWritable ibw = new ImmutableBytesWritable(); ImmutableBytesWritable ibw = new ImmutableBytesWritable();
HStoreKey firstKey HStoreKey firstKey
= (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), ibw); = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), ibw);
@ -276,7 +278,7 @@ implements ChangedReadersObserver {
* @param i which reader to fetch next value from * @param i which reader to fetch next value from
* @return true if there is more data available * @return true if there is more data available
*/ */
boolean getNext(int i) throws IOException { private boolean getNext(int i) throws IOException {
boolean result = false; boolean result = false;
ImmutableBytesWritable ibw = new ImmutableBytesWritable(); ImmutableBytesWritable ibw = new ImmutableBytesWritable();
long now = System.currentTimeMillis(); long now = System.currentTimeMillis();
@ -302,7 +304,7 @@ implements ChangedReadersObserver {
} }
/** Close down the indicated reader. */ /** Close down the indicated reader. */
void closeSubScanner(int i) { private void closeSubScanner(int i) {
try { try {
if(readers[i] != null) { if(readers[i] != null) {
try { try {
@ -355,4 +357,4 @@ implements ChangedReadersObserver {
this.lock.writeLock().unlock(); this.lock.writeLock().unlock();
} }
} }
} }

View File

@ -22,28 +22,36 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.HLog; import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.InternalScanner;
/** /**
* Contains utility methods for manipulating HBase meta tables * Contains utility methods for manipulating HBase meta tables.
* Be sure to call {@link #shutdown()} when done with this class so it closes
* resources opened during meta processing (ROOT, META, etc.).
*/ */
public class MetaUtils { public class MetaUtils {
private static final Log LOG = LogFactory.getLog(MetaUtils.class); private static final Log LOG = LogFactory.getLog(MetaUtils.class);
@ -209,7 +217,6 @@ public class MetaUtils {
} }
// Open root region so we can scan it // Open root region so we can scan it
if (this.rootRegion == null) { if (this.rootRegion == null) {
openRootRegion(); openRootRegion();
} }
@ -261,7 +268,7 @@ public class MetaUtils {
HRegion metaRegion = openMetaRegion(metaRegionInfo); HRegion metaRegion = openMetaRegion(metaRegionInfo);
scanMetaRegion(metaRegion, listener); scanMetaRegion(metaRegion, listener);
} }
/** /**
* Scan the passed in metaregion <code>m</code> invoking the passed * Scan the passed in metaregion <code>m</code> invoking the passed
* <code>listener</code> per row found. * <code>listener</code> per row found.
@ -269,8 +276,7 @@ public class MetaUtils {
* @param listener * @param listener
* @throws IOException * @throws IOException
*/ */
public void scanMetaRegion(final HRegion m, public void scanMetaRegion(final HRegion m, final ScannerListener listener)
final ScannerListener listener)
throws IOException { throws IOException {
InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY, InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY,
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null); HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
@ -295,13 +301,13 @@ public class MetaUtils {
metaScanner.close(); metaScanner.close();
} }
} }
private void openRootRegion() throws IOException { private void openRootRegion() throws IOException {
this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO, this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
this.rootdir, this.log, this.conf); this.rootdir, this.log, this.conf);
this.rootRegion.compactStores(); this.rootRegion.compactStores();
} }
private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException { private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
HRegion meta = HRegion meta =
HRegion.openHRegion(metaInfo, this.rootdir, this.log, this.conf); HRegion.openHRegion(metaInfo, this.rootdir, this.log, this.conf);
@ -339,4 +345,99 @@ public class MetaUtils {
b.delete(HConstants.COL_STARTCODE); b.delete(HConstants.COL_STARTCODE);
t.commit(b); t.commit(b);
} }
/**
* @param tableName
* @param hcd Add this column to <code>tableName</code>
* @throws IOException
*/
public void addColumn(final byte [] tableName,
final HColumnDescriptor hcd)
throws IOException {
List<HRegionInfo> metas = getMETARowsInROOT();
for (HRegionInfo hri: metas) {
final HRegion m = getMetaRegion(hri);
scanMetaRegion(m, new ScannerListener() {
private boolean inTable = false;
@SuppressWarnings("synthetic-access")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
this.inTable = true;
info.getTableDesc().addFamily(hcd);
updateMETARegionInfo(m, info);
return false;
}
// If we got here and we have not yet encountered the table yet,
// inTable will be false. Otherwise, we've passed out the table.
// Stop the scanner.
return this.inTable;
}});
}
}
/**
* @param tableName
* @param columnFamily Name of column name to remove.
* @throws IOException
*/
public void deleteColumn(final byte [] tableName,
final byte [] columnFamily) throws IOException {
List<HRegionInfo> metas = getMETARowsInROOT();
for (HRegionInfo hri: metas) {
final HRegion m = getMetaRegion(hri);
scanMetaRegion(m, new ScannerListener() {
private boolean inTable = false;
@SuppressWarnings("synthetic-access")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
this.inTable = true;
info.getTableDesc().removeFamily(columnFamily);
updateMETARegionInfo(m, info);
return false;
}
// If we got here and we have not yet encountered the table yet,
// inTable will be false. Otherwise, we've passed out the table.
// Stop the scanner.
return this.inTable;
}});
}
}
private void updateMETARegionInfo(HRegion r, final HRegionInfo hri)
throws IOException {
BatchUpdate b = new BatchUpdate(hri.getRegionName());
b.put(HConstants.COL_REGIONINFO, Writables.getBytes(hri));
r.batchUpdate(b);
if (LOG.isDebugEnabled()) {
LOG.debug("Updated " + Bytes.toString(HConstants.COL_REGIONINFO) +
" column in row " + hri.getRegionName() + " in " +
Bytes.toString(r.getRegionName()));
}
}
/**
* @return List of <code>.META.<code> {@link HRegionInfo} found in the
* <code>-ROOT-</code> table.
* @throws IOException
* @see #getMetaRegion(HRegionInfo)
*/
public List<HRegionInfo> getMETARowsInROOT() throws IOException {
if (!initialized) {
throw new IllegalStateException("Must call initialize method first.");
}
final List<HRegionInfo> result = new ArrayList<HRegionInfo>();
scanRootRegion(new ScannerListener() {
@SuppressWarnings("unused")
public boolean processRow(HRegionInfo info) throws IOException {
if (Bytes.equals(info.getTableDesc().getName(),
HConstants.META_TABLE_NAME)) {
result.add(info);
return false;
}
return true;
}});
return result;
}
} }

View File

@ -9,6 +9,11 @@ Automatically created by Tomcat JspC.
<web-app> <web-app>
<servlet>
<servlet-name>org.apache.hadoop.hbase.generated.master.loader_jsp</servlet-name>
<servlet-class>org.apache.hadoop.hbase.generated.master.loader_jsp</servlet-class>
</servlet>
<servlet> <servlet>
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name> <servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
<servlet-class>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-class> <servlet-class>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-class>
@ -19,6 +24,11 @@ Automatically created by Tomcat JspC.
<servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class> <servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class>
</servlet> </servlet>
<servlet-mapping>
<servlet-name>org.apache.hadoop.hbase.generated.master.loader_jsp</servlet-name>
<url-pattern>/loader.jsp</url-pattern>
</servlet-mapping>
<servlet-mapping> <servlet-mapping>
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name> <servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
<url-pattern>/master.jsp</url-pattern> <url-pattern>/master.jsp</url-pattern>