HBASE-417 Factor TableOperation and subclasses into separate files from HMaster
HBASE-439 Add hbase logs path to svn:ignore list git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@620706 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
39a74cafec
commit
ef9a7c1017
|
@ -42,6 +42,8 @@ Hbase Change Log
|
|||
aggressive for Hudson. TestLogRolling timed out even though it
|
||||
was operating properly. Change test timeout to 10 minutes.
|
||||
HBASE-436 website: http://hadoop.apache.org/hbase
|
||||
HBASE-417 Factor TableOperation and subclasses into separate files from
|
||||
HMaster (Bryan Duxbury via Stack)
|
||||
|
||||
|
||||
Branch 0.1
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.io.Text;
|
|||
*/
|
||||
public interface HConstants {
|
||||
|
||||
static final Long ZERO_L = Long.valueOf(0L);
|
||||
|
||||
// For migration
|
||||
|
||||
/** name of version file */
|
||||
|
|
|
@ -1809,7 +1809,6 @@ public class HRegion implements HConstants {
|
|||
* @param r HRegion to add to <code>meta</code>
|
||||
*
|
||||
* @throws IOException
|
||||
* @see {@link #removeRegionFromMETA(HRegion, HRegion)}
|
||||
*/
|
||||
public static void addRegionToMETA(HRegion meta, HRegion r)
|
||||
throws IOException {
|
||||
|
@ -1835,10 +1834,9 @@ public class HRegion implements HConstants {
|
|||
*
|
||||
* @param srvr META server to be updated
|
||||
* @param metaRegionName Meta region name
|
||||
* @param regionNmae HRegion to remove from <code>meta</code>
|
||||
* @param regionName HRegion to remove from <code>meta</code>
|
||||
*
|
||||
* @throws IOException
|
||||
* @see {@link #addRegionToMETA(HRegion, HRegion)}
|
||||
*/
|
||||
public static void removeRegionFromMETA(final HRegionInterface srvr,
|
||||
final Text metaRegionName, final Text regionName)
|
||||
|
@ -1853,7 +1851,6 @@ public class HRegion implements HConstants {
|
|||
* @param info HRegion to update in <code>meta</code>
|
||||
*
|
||||
* @throws IOException
|
||||
* @see {@link #addRegionToMETA(HRegion, HRegion)}
|
||||
*/
|
||||
public static void offlineRegionInMETA(final HRegionInterface srvr,
|
||||
final Text metaRegionName, final HRegionInfo info)
|
||||
|
|
|
@ -8,7 +8,7 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.master.HMaster.MetaRegion;
|
||||
import org.apache.hadoop.hbase.master.MetaRegion;
|
||||
import org.apache.hadoop.hbase.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
|
@ -97,7 +97,7 @@ public final class master_jsp extends org.apache.jasper.runtime.HttpJspBase
|
|||
out.write('\n');
|
||||
out.write(' ');
|
||||
out.write(' ');
|
||||
for (Map.Entry<Text, HMaster.MetaRegion> e: onlineRegions.entrySet()) {
|
||||
for (Map.Entry<Text, MetaRegion> e: onlineRegions.entrySet()) {
|
||||
MetaRegion meta = e.getValue();
|
||||
|
||||
out.write("\n <tr><td>");
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/** Instantiated to add a column family to a table */
|
||||
class AddColumn extends ColumnOperation {
|
||||
private final HColumnDescriptor newColumn;
|
||||
|
||||
AddColumn(final HMaster master, final Text tableName,
|
||||
final HColumnDescriptor newColumn)
|
||||
throws IOException {
|
||||
super(master, tableName);
|
||||
this.newColumn = newColumn;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postProcessMeta(MetaRegion m, HRegionInterface server)
|
||||
throws IOException {
|
||||
for (HRegionInfo i: unservedRegions) {
|
||||
// All we need to do to add a column is add it to the table descriptor.
|
||||
// When the region is brought on-line, it will find the column missing
|
||||
// and create it.
|
||||
i.getTableDesc().addFamily(newColumn);
|
||||
updateRegionInfo(server, m.getRegionName(), i);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,416 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.SortedMap;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.HRegion;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HStoreFile;
|
||||
import org.apache.hadoop.hbase.HStore;
|
||||
import org.apache.hadoop.hbase.HLog;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Base HRegion scanner class. Holds utilty common to <code>ROOT</code> and
|
||||
* <code>META</code> HRegion scanners.
|
||||
*
|
||||
* <p>How do we know if all regions are assigned? After the initial scan of
|
||||
* the <code>ROOT</code> and <code>META</code> regions, all regions known at
|
||||
* that time will have been or are in the process of being assigned.</p>
|
||||
*
|
||||
* <p>When a region is split the region server notifies the master of the
|
||||
* split and the new regions are assigned. But suppose the master loses the
|
||||
* split message? We need to periodically rescan the <code>ROOT</code> and
|
||||
* <code>META</code> regions.
|
||||
* <ul>
|
||||
* <li>If we rescan, any regions that are new but not assigned will have
|
||||
* no server info. Any regions that are not being served by the same
|
||||
* server will get re-assigned.</li>
|
||||
*
|
||||
* <li>Thus a periodic rescan of the root region will find any new
|
||||
* <code>META</code> regions where we missed the <code>META</code> split
|
||||
* message or we failed to detect a server death and consequently need to
|
||||
* assign the region to a new server.</li>
|
||||
*
|
||||
* <li>if we keep track of all the known <code>META</code> regions, then
|
||||
* we can rescan them periodically. If we do this then we can detect any
|
||||
* regions for which we missed a region split message.</li>
|
||||
* </ul>
|
||||
*
|
||||
* Thus just keeping track of all the <code>META</code> regions permits
|
||||
* periodic rescanning which will detect unassigned regions (new or
|
||||
* otherwise) without the need to keep track of every region.</p>
|
||||
*
|
||||
* <p>So the <code>ROOT</code> region scanner needs to wake up:
|
||||
* <ol>
|
||||
* <li>when the master receives notification that the <code>ROOT</code>
|
||||
* region has been opened.</li>
|
||||
* <li>periodically after the first scan</li>
|
||||
* </ol>
|
||||
*
|
||||
* The <code>META</code> scanner needs to wake up:
|
||||
* <ol>
|
||||
* <li>when a <code>META</code> region comes on line</li>
|
||||
* </li>periodically to rescan the online <code>META</code> regions</li>
|
||||
* </ol>
|
||||
*
|
||||
* <p>A <code>META</code> region is not 'online' until it has been scanned
|
||||
* once.
|
||||
*/
|
||||
abstract class BaseScanner extends Chore implements HConstants {
|
||||
static final Log LOG = LogFactory.getLog(BaseScanner.class.getName());
|
||||
|
||||
protected final boolean rootRegion;
|
||||
protected final HMaster master;
|
||||
|
||||
protected abstract boolean initialScan();
|
||||
protected abstract void maintenanceScan();
|
||||
|
||||
BaseScanner(final HMaster master, final boolean rootRegion, final int period,
|
||||
final AtomicBoolean stop) {
|
||||
super(period, stop);
|
||||
this.rootRegion = rootRegion;
|
||||
this.master = master;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean initialChore() {
|
||||
return initialScan();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void chore() {
|
||||
maintenanceScan();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param region Region to scan
|
||||
* @throws IOException
|
||||
*/
|
||||
protected void scanRegion(final MetaRegion region) throws IOException {
|
||||
HRegionInterface regionServer = null;
|
||||
long scannerId = -1L;
|
||||
LOG.info(Thread.currentThread().getName() + " scanning meta region " +
|
||||
region.toString());
|
||||
|
||||
// Array to hold list of split parents found. Scan adds to list. After
|
||||
// scan we go check if parents can be removed.
|
||||
Map<HRegionInfo, SortedMap<Text, byte[]>> splitParents =
|
||||
new HashMap<HRegionInfo, SortedMap<Text, byte[]>>();
|
||||
try {
|
||||
regionServer = master.connection.getHRegionConnection(region.getServer());
|
||||
scannerId =
|
||||
regionServer.openScanner(region.getRegionName(), COLUMN_FAMILY_ARRAY,
|
||||
EMPTY_START_ROW, System.currentTimeMillis(), null);
|
||||
|
||||
int numberOfRegionsFound = 0;
|
||||
while (true) {
|
||||
HbaseMapWritable values = regionServer.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// TODO: Why does this have to be a sorted map?
|
||||
SortedMap<Text, byte[]> results =
|
||||
RowMap.fromHbaseMapWritable(values).getMap();
|
||||
|
||||
HRegionInfo info = master.getHRegionInfo(results);
|
||||
if (info == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String serverName = Writables.bytesToString(results.get(COL_SERVER));
|
||||
long startCode = Writables.bytesToLong(results.get(COL_STARTCODE));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(Thread.currentThread().getName() + " regioninfo: {" +
|
||||
info.toString() + "}, server: " + serverName + ", startCode: " +
|
||||
startCode);
|
||||
}
|
||||
|
||||
// Note Region has been assigned.
|
||||
checkAssigned(info, serverName, startCode);
|
||||
if (isSplitParent(info)) {
|
||||
splitParents.put(info, results);
|
||||
}
|
||||
numberOfRegionsFound += 1;
|
||||
}
|
||||
if (this.rootRegion) {
|
||||
master.numberOfMetaRegions.set(numberOfRegionsFound);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (e instanceof RemoteException) {
|
||||
e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
|
||||
if (e instanceof UnknownScannerException) {
|
||||
// Reset scannerId so we do not try closing a scanner the other side
|
||||
// has lost account of: prevents duplicated stack trace out of the
|
||||
// below close in the finally.
|
||||
scannerId = -1L;
|
||||
}
|
||||
}
|
||||
throw e;
|
||||
} finally {
|
||||
try {
|
||||
if (scannerId != -1L && regionServer != null) {
|
||||
regionServer.close(scannerId);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Closing scanner",
|
||||
RemoteExceptionHandler.checkIOException(e));
|
||||
}
|
||||
}
|
||||
|
||||
// Scan is finished. Take a look at split parents to see if any we can
|
||||
// clean up.
|
||||
if (splitParents.size() > 0) {
|
||||
for (Map.Entry<HRegionInfo, SortedMap<Text, byte[]>> e:
|
||||
splitParents.entrySet()) {
|
||||
HRegionInfo hri = e.getKey();
|
||||
cleanupSplits(region.getRegionName(), regionServer, hri, e.getValue());
|
||||
}
|
||||
}
|
||||
LOG.info(Thread.currentThread().getName() + " scan of meta region " +
|
||||
region.toString() + " complete");
|
||||
}
|
||||
|
||||
/*
|
||||
* @param info Region to check.
|
||||
* @return True if this is a split parent.
|
||||
*/
|
||||
private boolean isSplitParent(final HRegionInfo info) {
|
||||
if (!info.isSplit()) {
|
||||
return false;
|
||||
}
|
||||
if (!info.isOffline()) {
|
||||
LOG.warn("Region is split but not offline: " + info.getRegionName());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If daughters no longer hold reference to the parents, delete the parent.
|
||||
* @param metaRegionName Meta region name.
|
||||
* @param server HRegionInterface of meta server to talk to
|
||||
* @param parent HRegionInfo of split parent
|
||||
* @param rowContent Content of <code>parent</code> row in
|
||||
* <code>metaRegionName</code>
|
||||
* @return True if we removed <code>parent</code> from meta table and from
|
||||
* the filesystem.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean cleanupSplits(final Text metaRegionName,
|
||||
final HRegionInterface srvr, final HRegionInfo parent,
|
||||
SortedMap<Text, byte[]> rowContent)
|
||||
throws IOException {
|
||||
boolean result = false;
|
||||
|
||||
boolean hasReferencesA = hasReferences(metaRegionName, srvr,
|
||||
parent.getRegionName(), rowContent, COL_SPLITA);
|
||||
boolean hasReferencesB = hasReferences(metaRegionName, srvr,
|
||||
parent.getRegionName(), rowContent, COL_SPLITB);
|
||||
|
||||
if (!hasReferencesA && !hasReferencesB) {
|
||||
LOG.info("Deleting region " + parent.getRegionName() +
|
||||
" because daughter splits no longer hold references");
|
||||
if (!HRegion.deleteRegion(master.fs, master.rootdir, parent)) {
|
||||
LOG.warn("Deletion of " + parent.getRegionName() + " failed");
|
||||
}
|
||||
|
||||
HRegion.removeRegionFromMETA(srvr, metaRegionName,
|
||||
parent.getRegionName());
|
||||
result = true;
|
||||
} else if (LOG.isDebugEnabled()) {
|
||||
// If debug, note we checked and current state of daughters.
|
||||
LOG.debug("Checked " + parent.getRegionName() +
|
||||
" for references: splitA: " + hasReferencesA + ", splitB: "+
|
||||
hasReferencesB);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks if a daughter region -- either splitA or splitB -- still holds
|
||||
* references to parent. If not, removes reference to the split from
|
||||
* the parent meta region row.
|
||||
* @param metaRegionName Name of meta region to look in.
|
||||
* @param srvr Where region resides.
|
||||
* @param parent Parent region name.
|
||||
* @param rowContent Keyed content of the parent row in meta region.
|
||||
* @param splitColumn Column name of daughter split to examine
|
||||
* @return True if still has references to parent.
|
||||
* @throws IOException
|
||||
*/
|
||||
protected boolean hasReferences(final Text metaRegionName,
|
||||
final HRegionInterface srvr, final Text parent,
|
||||
SortedMap<Text, byte[]> rowContent, final Text splitColumn)
|
||||
throws IOException {
|
||||
boolean result = false;
|
||||
HRegionInfo split =
|
||||
Writables.getHRegionInfoOrNull(rowContent.get(splitColumn));
|
||||
if (split == null) {
|
||||
return result;
|
||||
}
|
||||
Path tabledir =
|
||||
HTableDescriptor.getTableDir(master.rootdir, split.getTableDesc().getName());
|
||||
for (HColumnDescriptor family: split.getTableDesc().families().values()) {
|
||||
Path p = HStoreFile.getMapDir(tabledir, split.getEncodedName(),
|
||||
family.getFamilyName());
|
||||
|
||||
// Look for reference files. Call listStatus with an anonymous
|
||||
// instance of PathFilter.
|
||||
|
||||
FileStatus [] ps = master.fs.listStatus(p,
|
||||
new PathFilter () {
|
||||
public boolean accept(Path path) {
|
||||
return HStore.isReference(path);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
if (ps != null && ps.length > 0) {
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (result) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(split.getRegionName().toString()
|
||||
+" no longer has references to " + parent.toString());
|
||||
}
|
||||
|
||||
BatchUpdate b = new BatchUpdate(parent);
|
||||
b.delete(splitColumn);
|
||||
srvr.batchUpdate(metaRegionName, b);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
protected void checkAssigned(final HRegionInfo info,
|
||||
final String serverName, final long startCode) throws IOException {
|
||||
|
||||
// Skip region - if ...
|
||||
if(info.isOffline() // offline
|
||||
|| master.killedRegions.contains(info.getRegionName()) // queued for offline
|
||||
|| master.regionsToDelete.contains(info.getRegionName())) { // queued for delete
|
||||
|
||||
master.unassignedRegions.remove(info);
|
||||
return;
|
||||
}
|
||||
HServerInfo storedInfo = null;
|
||||
boolean deadServer = false;
|
||||
if (serverName.length() != 0) {
|
||||
synchronized (master.killList) {
|
||||
Map<Text, HRegionInfo> regionsToKill = master.killList.get(serverName);
|
||||
if (regionsToKill != null &&
|
||||
regionsToKill.containsKey(info.getRegionName())) {
|
||||
|
||||
// Skip if region is on kill list
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("not assigning region (on kill list): " +
|
||||
info.getRegionName());
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
storedInfo = master.serversToServerInfo.get(serverName);
|
||||
deadServer = master.deadServers.contains(serverName);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the server is not dead and either:
|
||||
* the stored info is not null and the start code does not match
|
||||
* or:
|
||||
* the stored info is null and the region is neither unassigned nor pending
|
||||
* then:
|
||||
*/
|
||||
if (!deadServer &&
|
||||
((storedInfo != null && storedInfo.getStartCode() != startCode) ||
|
||||
(storedInfo == null &&
|
||||
!master.unassignedRegions.containsKey(info) &&
|
||||
!master.pendingRegions.contains(info.getRegionName())
|
||||
)
|
||||
)
|
||||
) {
|
||||
|
||||
// The current assignment is invalid
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Current assignment of " + info.getRegionName() +
|
||||
" is not valid: storedInfo: " + storedInfo + ", startCode: " +
|
||||
startCode + ", storedInfo.startCode: " +
|
||||
((storedInfo != null)? storedInfo.getStartCode(): -1) +
|
||||
", unassignedRegions: " + master.unassignedRegions.containsKey(info) +
|
||||
", pendingRegions: " +
|
||||
master.pendingRegions.contains(info.getRegionName()));
|
||||
}
|
||||
// Recover the region server's log if there is one.
|
||||
// This is only done from here if we are restarting and there is stale
|
||||
// data in the meta region. Once we are on-line, dead server log
|
||||
// recovery is handled by lease expiration and ProcessServerShutdown
|
||||
if (!master.initialMetaScanComplete && serverName.length() != 0) {
|
||||
StringBuilder dirName = new StringBuilder("log_");
|
||||
dirName.append(serverName.replace(":", "_"));
|
||||
Path logDir = new Path(master.rootdir, dirName.toString());
|
||||
try {
|
||||
if (master.fs.exists(logDir)) {
|
||||
master.splitLogLock.lock();
|
||||
try {
|
||||
HLog.splitLog(master.rootdir, logDir, master.fs, master.conf);
|
||||
} finally {
|
||||
master.splitLogLock.unlock();
|
||||
}
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Split " + logDir.toString());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("unable to split region server log because: ", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
// Now get the region assigned
|
||||
master.unassignedRegions.put(info, ZERO_L);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/** Instantiated to enable or disable a table */
|
||||
class ChangeTableState extends TableOperation {
|
||||
private boolean online;
|
||||
|
||||
protected final Map<String, HashSet<HRegionInfo>> servedRegions =
|
||||
new HashMap<String, HashSet<HRegionInfo>>();
|
||||
|
||||
protected long lockid;
|
||||
|
||||
ChangeTableState(final HMaster master, final Text tableName,
|
||||
final boolean onLine)
|
||||
throws IOException {
|
||||
super(master, tableName);
|
||||
this.online = onLine;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processScanItem(String serverName, long startCode,
|
||||
HRegionInfo info) {
|
||||
|
||||
if (isBeingServed(serverName, startCode)) {
|
||||
HashSet<HRegionInfo> regions = servedRegions.get(serverName);
|
||||
if (regions == null) {
|
||||
regions = new HashSet<HRegionInfo>();
|
||||
}
|
||||
regions.add(info);
|
||||
servedRegions.put(serverName, regions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postProcessMeta(MetaRegion m, HRegionInterface server)
|
||||
throws IOException {
|
||||
// Process regions not being served
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("processing unserved regions");
|
||||
}
|
||||
for (HRegionInfo i: unservedRegions) {
|
||||
if (i.isOffline() && i.isSplit()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Skipping region " + i.toString() + " because it is " +
|
||||
"offline because it has been split");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Update meta table
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("updating columns in row: " + i.getRegionName());
|
||||
}
|
||||
|
||||
BatchUpdate b = new BatchUpdate(i.getRegionName());
|
||||
updateRegionInfo(b, i);
|
||||
b.delete(COL_SERVER);
|
||||
b.delete(COL_STARTCODE);
|
||||
server.batchUpdate(m.getRegionName(), b);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("updated columns in row: " + i.getRegionName());
|
||||
}
|
||||
|
||||
if (online) { // Bring offline regions on-line
|
||||
if (!this.master.unassignedRegions.containsKey(i)) {
|
||||
this.master.unassignedRegions.put(i, ZERO_L);
|
||||
}
|
||||
|
||||
} else { // Prevent region from getting assigned.
|
||||
this.master.unassignedRegions.remove(i);
|
||||
}
|
||||
}
|
||||
|
||||
// Process regions currently being served
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("processing regions currently being served");
|
||||
}
|
||||
for (Map.Entry<String, HashSet<HRegionInfo>> e: servedRegions.entrySet()) {
|
||||
String serverName = e.getKey();
|
||||
if (online) {
|
||||
LOG.debug("Already online");
|
||||
continue; // Already being served
|
||||
}
|
||||
|
||||
// Cause regions being served to be taken off-line and disabled
|
||||
|
||||
HashMap<Text, HRegionInfo> localKillList =
|
||||
new HashMap<Text, HRegionInfo>();
|
||||
|
||||
synchronized (this.master.killList) {
|
||||
HashMap<Text, HRegionInfo> killedRegions =
|
||||
this.master.killList.get(serverName);
|
||||
if (killedRegions != null) {
|
||||
localKillList.putAll(killedRegions);
|
||||
}
|
||||
}
|
||||
for (HRegionInfo i: e.getValue()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("adding region " + i.getRegionName() +
|
||||
" to local kill list");
|
||||
}
|
||||
localKillList.put(i.getRegionName(), i);
|
||||
}
|
||||
if (localKillList.size() > 0) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("inserted local kill list into kill list for server " +
|
||||
serverName);
|
||||
}
|
||||
this.master.killList.put(serverName, localKillList);
|
||||
}
|
||||
}
|
||||
servedRegions.clear();
|
||||
}
|
||||
|
||||
protected void updateRegionInfo(final BatchUpdate b, final HRegionInfo i)
|
||||
throws IOException {
|
||||
i.setOffline(!online);
|
||||
b.put(COL_REGIONINFO, Writables.getBytes(i));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
||||
abstract class ColumnOperation extends TableOperation {
|
||||
|
||||
protected ColumnOperation(final HMaster master, final Text tableName)
|
||||
throws IOException {
|
||||
super(master, tableName);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processScanItem(
|
||||
@SuppressWarnings("unused") String serverName,
|
||||
@SuppressWarnings("unused") long startCode, final HRegionInfo info)
|
||||
throws IOException {
|
||||
if (isEnabled(info)) {
|
||||
throw new TableNotDisabledException(tableName.toString());
|
||||
}
|
||||
}
|
||||
|
||||
protected void updateRegionInfo(HRegionInterface server, Text regionName,
|
||||
HRegionInfo i)
|
||||
throws IOException {
|
||||
BatchUpdate b = new BatchUpdate(i.getRegionName());
|
||||
b.put(COL_REGIONINFO, Writables.getBytes(i));
|
||||
server.batchUpdate(regionName, b);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("updated columns in row: " + i.getRegionName());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HStoreFile;
|
||||
|
||||
/** Instantiated to remove a column family from a table */
|
||||
class DeleteColumn extends ColumnOperation {
|
||||
private final Text columnName;
|
||||
|
||||
DeleteColumn(final HMaster master, final Text tableName,
|
||||
final Text columnName)
|
||||
throws IOException {
|
||||
super(master, tableName);
|
||||
this.columnName = columnName;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postProcessMeta(MetaRegion m, HRegionInterface server)
|
||||
throws IOException {
|
||||
Path tabledir = new Path(this.master.rootdir, tableName.toString());
|
||||
for (HRegionInfo i: unservedRegions) {
|
||||
i.getTableDesc().families().remove(columnName);
|
||||
updateRegionInfo(server, m.getRegionName(), i);
|
||||
|
||||
// Delete the directories used by the column
|
||||
|
||||
String encodedName = i.getEncodedName();
|
||||
this.master.fs.delete(
|
||||
HStoreFile.getMapDir(tabledir, encodedName, columnName));
|
||||
this.master.fs.delete(
|
||||
HStoreFile.getInfoDir(tabledir, encodedName, columnName));
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -54,4 +54,4 @@ public interface HMasterRegionInterface extends VersionedProtocol {
|
|||
*/
|
||||
public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[])
|
||||
throws IOException;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
|
||||
/** Describes a meta region and its server */
|
||||
public class MetaRegion implements Comparable<MetaRegion> {
|
||||
private final HServerAddress server;
|
||||
private final Text regionName;
|
||||
private final Text startKey;
|
||||
|
||||
MetaRegion(final HServerAddress server, final Text regionName,
|
||||
final Text startKey) {
|
||||
if (server == null) {
|
||||
throw new IllegalArgumentException("server cannot be null");
|
||||
}
|
||||
this.server = server;
|
||||
|
||||
if (regionName == null) {
|
||||
throw new IllegalArgumentException("regionName cannot be null");
|
||||
}
|
||||
this.regionName = new Text(regionName);
|
||||
|
||||
this.startKey = new Text();
|
||||
if (startKey != null) {
|
||||
this.startKey.set(startKey);
|
||||
}
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public String toString() {
|
||||
return "{regionname: " + this.regionName.toString() + ", startKey: <" +
|
||||
this.startKey.toString() + ">, server: " + this.server.toString() + "}";
|
||||
}
|
||||
|
||||
/** @return the regionName */
|
||||
public Text getRegionName() {
|
||||
return regionName;
|
||||
}
|
||||
|
||||
/** @return the server */
|
||||
public HServerAddress getServer() {
|
||||
return server;
|
||||
}
|
||||
|
||||
/** @return the startKey */
|
||||
public Text getStartKey() {
|
||||
return startKey;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
return o instanceof MetaRegion && this.compareTo((MetaRegion)o) == 0;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = this.regionName.hashCode();
|
||||
result ^= this.startKey.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
// Comparable
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public int compareTo(MetaRegion other) {
|
||||
int result = this.regionName.compareTo(other.getRegionName());
|
||||
if(result == 0) {
|
||||
result = this.startKey.compareTo(other.getStartKey());
|
||||
if (result == 0) {
|
||||
// Might be on different host?
|
||||
result = this.server.compareTo(other.server);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,157 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
/**
|
||||
* MetaScanner <code>META</code> table.
|
||||
*
|
||||
* When a <code>META</code> server comes on line, a MetaRegion object is
|
||||
* queued up by regionServerReport() and this thread wakes up.
|
||||
*
|
||||
* It's important to do this work in a separate thread, or else the blocking
|
||||
* action would prevent other work from getting done.
|
||||
*/
|
||||
class MetaScanner extends BaseScanner {
|
||||
private final List<MetaRegion> metaRegionsToRescan =
|
||||
new ArrayList<MetaRegion>();
|
||||
|
||||
/** Constructor */
|
||||
public MetaScanner(HMaster master) {
|
||||
super(master, false, master.metaRescanInterval, master.closed);
|
||||
}
|
||||
|
||||
private boolean scanOneMetaRegion(MetaRegion region) {
|
||||
// Don't retry if we get an error while scanning. Errors are most often
|
||||
// caused by the server going away. Wait until next rescan interval when
|
||||
// things should be back to normal
|
||||
boolean scanSuccessful = false;
|
||||
while (!master.closed.get() && !master.rootScanned &&
|
||||
master.rootRegionLocation.get() == null) {
|
||||
master.sleeper.sleep();
|
||||
}
|
||||
if (master.closed.get()) {
|
||||
return scanSuccessful;
|
||||
}
|
||||
|
||||
try {
|
||||
// Don't interrupt us while we're working
|
||||
synchronized (master.metaScannerLock) {
|
||||
scanRegion(region);
|
||||
master.onlineMetaRegions.put(region.getStartKey(), region);
|
||||
}
|
||||
scanSuccessful = true;
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
LOG.warn("Scan one META region: " + region.toString(), e);
|
||||
// The region may have moved (TestRegionServerAbort, etc.). If
|
||||
// so, either it won't be in the onlineMetaRegions list or its host
|
||||
// address has changed and the containsValue will fail. If not
|
||||
// found, best thing to do here is probably return.
|
||||
if (!master.onlineMetaRegions.containsValue(region.getStartKey())) {
|
||||
LOG.debug("Scanned region is no longer in map of online " +
|
||||
"regions or its value has changed");
|
||||
return scanSuccessful;
|
||||
}
|
||||
// Make sure the file system is still available
|
||||
master.checkFileSystem();
|
||||
} catch (Exception e) {
|
||||
// If for some reason we get some other kind of exception,
|
||||
// at least log it rather than go out silently.
|
||||
LOG.error("Unexpected exception", e);
|
||||
}
|
||||
return scanSuccessful;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean initialScan() {
|
||||
MetaRegion region = null;
|
||||
while (!master.closed.get() && region == null && !metaRegionsScanned()) {
|
||||
try {
|
||||
region = master.metaRegionsToScan.poll(master.threadWakeFrequency,
|
||||
TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
// continue
|
||||
}
|
||||
if (region == null && metaRegionsToRescan.size() != 0) {
|
||||
region = metaRegionsToRescan.remove(0);
|
||||
}
|
||||
if (region != null) {
|
||||
if (!scanOneMetaRegion(region)) {
|
||||
metaRegionsToRescan.add(region);
|
||||
}
|
||||
}
|
||||
}
|
||||
master.initialMetaScanComplete = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void maintenanceScan() {
|
||||
ArrayList<MetaRegion> regions = new ArrayList<MetaRegion>();
|
||||
synchronized (master.onlineMetaRegions) {
|
||||
regions.addAll(master.onlineMetaRegions.values());
|
||||
}
|
||||
for (MetaRegion r: regions) {
|
||||
scanOneMetaRegion(r);
|
||||
}
|
||||
metaRegionsScanned();
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by the meta scanner when it has completed scanning all meta
|
||||
* regions. This wakes up any threads that were waiting for this to happen.
|
||||
*/
|
||||
private synchronized boolean metaRegionsScanned() {
|
||||
if (!master.rootScanned ||
|
||||
master.numberOfMetaRegions.get() != master.onlineMetaRegions.size()) {
|
||||
return false;
|
||||
}
|
||||
LOG.info("all meta regions scanned");
|
||||
notifyAll();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Other threads call this method to wait until all the meta regions have
|
||||
* been scanned.
|
||||
*/
|
||||
synchronized boolean waitForMetaRegionsOrClose() {
|
||||
while (!master.closed.get()) {
|
||||
if (master.rootScanned &&
|
||||
master.numberOfMetaRegions.get() == master.onlineMetaRegions.size()) {
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
wait(master.threadWakeFrequency);
|
||||
} catch (InterruptedException e) {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
return master.closed.get();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.util.Map;
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/** Instantiated to modify an existing column family on a table */
|
||||
class ModifyColumn extends ColumnOperation {
|
||||
private final HColumnDescriptor descriptor;
|
||||
private final Text columnName;
|
||||
|
||||
ModifyColumn(final HMaster master, final Text tableName,
|
||||
final Text columnName, HColumnDescriptor descriptor)
|
||||
throws IOException {
|
||||
super(master, tableName);
|
||||
this.descriptor = descriptor;
|
||||
this.columnName = columnName;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postProcessMeta(MetaRegion m, HRegionInterface server)
|
||||
throws IOException {
|
||||
for (HRegionInfo i: unservedRegions) {
|
||||
// get the column families map from the table descriptor
|
||||
Map<Text, HColumnDescriptor> families = i.getTableDesc().families();
|
||||
|
||||
// if the table already has this column, then put the new descriptor
|
||||
// version.
|
||||
if (families.get(columnName) != null){
|
||||
families.put(columnName, descriptor);
|
||||
updateRegionInfo(server, m.getRegionName(), i);
|
||||
}
|
||||
else{ // otherwise, we have an error.
|
||||
throw new IOException("Column family '" + columnName +
|
||||
"' doesn't exist, so cannot be modified.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegion;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
/**
|
||||
* ProcessRegionClose is instantiated when a region server reports that it
|
||||
* has closed a region.
|
||||
*/
|
||||
class ProcessRegionClose extends ProcessRegionStatusChange {
|
||||
private boolean reassignRegion;
|
||||
private boolean deleteRegion;
|
||||
|
||||
/**
|
||||
* @param regionInfo
|
||||
* @param reassignRegion
|
||||
* @param deleteRegion
|
||||
*/
|
||||
public ProcessRegionClose(HMaster master, HRegionInfo regionInfo,
|
||||
boolean reassignRegion, boolean deleteRegion) {
|
||||
|
||||
super(master, regionInfo);
|
||||
this.reassignRegion = reassignRegion;
|
||||
this.deleteRegion = deleteRegion;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ProcessRegionClose of " + this.regionInfo.getRegionName();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean process() throws IOException {
|
||||
for (int tries = 0; tries < numRetries; tries++) {
|
||||
if (master.closed.get()) {
|
||||
return true;
|
||||
}
|
||||
LOG.info("region closed: " + regionInfo.getRegionName());
|
||||
|
||||
// Mark the Region as unavailable in the appropriate meta table
|
||||
|
||||
if (!metaRegionAvailable()) {
|
||||
// We can't proceed unless the meta region we are going to update
|
||||
// is online. metaRegionAvailable() has put this operation on the
|
||||
// delayedToDoQueue, so return true so the operation is not put
|
||||
// back on the toDoQueue
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
if (deleteRegion) {
|
||||
HRegion.removeRegionFromMETA(getMetaServer(), metaRegionName,
|
||||
regionInfo.getRegionName());
|
||||
} else {
|
||||
HRegion.offlineRegionInMETA(getMetaServer(), metaRegionName,
|
||||
regionInfo);
|
||||
}
|
||||
break;
|
||||
|
||||
} catch (IOException e) {
|
||||
if (tries == numRetries - 1) {
|
||||
throw RemoteExceptionHandler.checkIOException(e);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (reassignRegion) {
|
||||
LOG.info("reassign region: " + regionInfo.getRegionName());
|
||||
|
||||
master.unassignedRegions.put(regionInfo, ZERO_L);
|
||||
|
||||
} else if (deleteRegion) {
|
||||
try {
|
||||
HRegion.deleteRegion(master.fs, master.rootdir, regionInfo);
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
LOG.error("failed delete region " + regionInfo.getRegionName(), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
/**
|
||||
* ProcessRegionOpen is instantiated when a region server reports that it is
|
||||
* serving a region. This applies to all meta and user regions except the
|
||||
* root region which is handled specially.
|
||||
*/
|
||||
class ProcessRegionOpen extends ProcessRegionStatusChange {
|
||||
private final HServerAddress serverAddress;
|
||||
private final byte [] startCode;
|
||||
|
||||
/**
|
||||
* @param info
|
||||
* @param regionInfo
|
||||
* @throws IOException
|
||||
*/
|
||||
public ProcessRegionOpen(HMaster master, HServerInfo info,
|
||||
HRegionInfo regionInfo)
|
||||
throws IOException {
|
||||
super(master, regionInfo);
|
||||
this.serverAddress = info.getServerAddress();
|
||||
this.startCode = Writables.longToBytes(info.getStartCode());
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PendingOpenOperation from " + serverAddress.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean process() throws IOException {
|
||||
for (int tries = 0; tries < numRetries; tries++) {
|
||||
if (master.closed.get()) {
|
||||
return true;
|
||||
}
|
||||
LOG.info(regionInfo.toString() + " open on " +
|
||||
this.serverAddress.toString());
|
||||
|
||||
if (!metaRegionAvailable()) {
|
||||
// We can't proceed unless the meta region we are going to update
|
||||
// is online. metaRegionAvailable() has put this operation on the
|
||||
// delayedToDoQueue, so return true so the operation is not put
|
||||
// back on the toDoQueue
|
||||
return true;
|
||||
}
|
||||
|
||||
// Register the newly-available Region's location.
|
||||
|
||||
HRegionInterface server = getMetaServer();
|
||||
LOG.info("updating row " + regionInfo.getRegionName() + " in table " +
|
||||
metaRegionName + " with startcode " +
|
||||
Writables.bytesToLong(this.startCode) + " and server "+
|
||||
serverAddress.toString());
|
||||
try {
|
||||
BatchUpdate b = new BatchUpdate(regionInfo.getRegionName());
|
||||
b.put(COL_SERVER, Writables.stringToBytes(serverAddress.toString()));
|
||||
b.put(COL_STARTCODE, startCode);
|
||||
server.batchUpdate(metaRegionName, b);
|
||||
if (isMetaTable) {
|
||||
// It's a meta region.
|
||||
MetaRegion m = new MetaRegion(this.serverAddress,
|
||||
this.regionInfo.getRegionName(), this.regionInfo.getStartKey());
|
||||
if (!master.initialMetaScanComplete) {
|
||||
// Put it on the queue to be scanned for the first time.
|
||||
try {
|
||||
LOG.debug("Adding " + m.toString() + " to regions to scan");
|
||||
master.metaRegionsToScan.put(m);
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(
|
||||
"Putting into metaRegionsToScan was interrupted.", e);
|
||||
}
|
||||
} else {
|
||||
// Add it to the online meta regions
|
||||
LOG.debug("Adding to onlineMetaRegions: " + m.toString());
|
||||
master.onlineMetaRegions.put(this.regionInfo.getStartKey(), m);
|
||||
}
|
||||
}
|
||||
// If updated successfully, remove from pending list.
|
||||
master.pendingRegions.remove(regionInfo.getRegionName());
|
||||
break;
|
||||
} catch (IOException e) {
|
||||
if (tries == numRetries - 1) {
|
||||
throw RemoteExceptionHandler.checkIOException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/**
|
||||
* Abstract class that performs common operations for
|
||||
* @see #ProcessRegionClose and @see #ProcessRegionOpen
|
||||
*/
|
||||
abstract class ProcessRegionStatusChange extends RegionServerOperation {
|
||||
protected final boolean isMetaTable;
|
||||
protected final HRegionInfo regionInfo;
|
||||
private MetaRegion metaRegion;
|
||||
protected Text metaRegionName;
|
||||
|
||||
/**
|
||||
* @param regionInfo
|
||||
*/
|
||||
public ProcessRegionStatusChange(HMaster master, HRegionInfo regionInfo) {
|
||||
super(master);
|
||||
this.regionInfo = regionInfo;
|
||||
this.isMetaTable = regionInfo.isMetaTable();
|
||||
this.metaRegion = null;
|
||||
this.metaRegionName = null;
|
||||
}
|
||||
|
||||
protected boolean metaRegionAvailable() {
|
||||
boolean available = true;
|
||||
if (isMetaTable) {
|
||||
// This operation is for the meta table
|
||||
if (!rootAvailable()) {
|
||||
// But we can't proceed unless the root region is available
|
||||
available = false;
|
||||
}
|
||||
} else {
|
||||
if (!master.rootScanned || !metaTableAvailable()) {
|
||||
// The root region has not been scanned or the meta table is not
|
||||
// available so we can't proceed.
|
||||
// Put the operation on the delayedToDoQueue
|
||||
requeue();
|
||||
available = false;
|
||||
}
|
||||
}
|
||||
return available;
|
||||
}
|
||||
|
||||
protected HRegionInterface getMetaServer() throws IOException {
|
||||
if (this.isMetaTable) {
|
||||
this.metaRegionName = HRegionInfo.rootRegionInfo.getRegionName();
|
||||
} else {
|
||||
if (this.metaRegion == null) {
|
||||
synchronized (master.onlineMetaRegions) {
|
||||
metaRegion = master.onlineMetaRegions.size() == 1 ?
|
||||
master.onlineMetaRegions.get(master.onlineMetaRegions.firstKey()) :
|
||||
master.onlineMetaRegions.containsKey(regionInfo.getRegionName()) ?
|
||||
master.onlineMetaRegions.get(regionInfo.getRegionName()) :
|
||||
master.onlineMetaRegions.get(master.onlineMetaRegions.headMap(
|
||||
regionInfo.getRegionName()).lastKey());
|
||||
}
|
||||
this.metaRegionName = metaRegion.getRegionName();
|
||||
}
|
||||
}
|
||||
|
||||
HServerAddress server = null;
|
||||
if (isMetaTable) {
|
||||
server = master.rootRegionLocation.get();
|
||||
|
||||
} else {
|
||||
server = metaRegion.getServer();
|
||||
}
|
||||
return master.connection.getHRegionConnection(server);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,337 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.SortedMap;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.HRegion;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.hbase.HLog;
|
||||
|
||||
/**
|
||||
* Instantiated when a server's lease has expired, meaning it has crashed.
|
||||
* The region server's log file needs to be split up for each region it was
|
||||
* serving, and the regions need to get reassigned.
|
||||
*/
|
||||
class ProcessServerShutdown extends RegionServerOperation {
|
||||
private HServerAddress deadServer;
|
||||
private String deadServerName;
|
||||
private Path oldLogDir;
|
||||
private boolean logSplit;
|
||||
private boolean rootRescanned;
|
||||
|
||||
private class ToDoEntry {
|
||||
boolean deleteRegion;
|
||||
boolean regionOffline;
|
||||
Text row;
|
||||
HRegionInfo info;
|
||||
|
||||
ToDoEntry(Text row, HRegionInfo info) {
|
||||
this.deleteRegion = false;
|
||||
this.regionOffline = false;
|
||||
this.row = row;
|
||||
this.info = info;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param serverInfo
|
||||
*/
|
||||
public ProcessServerShutdown(HMaster master, HServerInfo serverInfo) {
|
||||
super(master);
|
||||
this.deadServer = serverInfo.getServerAddress();
|
||||
this.deadServerName = this.deadServer.toString();
|
||||
this.logSplit = false;
|
||||
this.rootRescanned = false;
|
||||
StringBuilder dirName = new StringBuilder("log_");
|
||||
dirName.append(deadServer.getBindAddress());
|
||||
dirName.append("_");
|
||||
dirName.append(serverInfo.getStartCode());
|
||||
dirName.append("_");
|
||||
dirName.append(deadServer.getPort());
|
||||
this.oldLogDir = new Path(master.rootdir, dirName.toString());
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ProcessServerShutdown of " + this.deadServer.toString();
|
||||
}
|
||||
|
||||
/** Finds regions that the dead region server was serving */
|
||||
private void scanMetaRegion(HRegionInterface server, long scannerId,
|
||||
Text regionName) throws IOException {
|
||||
|
||||
ArrayList<ToDoEntry> toDoList = new ArrayList<ToDoEntry>();
|
||||
HashSet<HRegionInfo> regions = new HashSet<HRegionInfo>();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
HbaseMapWritable values = null;
|
||||
try {
|
||||
values = server.next(scannerId);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Shutdown scanning of meta region",
|
||||
RemoteExceptionHandler.checkIOException(e));
|
||||
break;
|
||||
}
|
||||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
// TODO: Why does this have to be a sorted map?
|
||||
RowMap rm = RowMap.fromHbaseMapWritable(values);
|
||||
Text row = rm.getRow();
|
||||
SortedMap<Text, byte[]> map = rm.getMap();
|
||||
if (LOG.isDebugEnabled() && row != null) {
|
||||
LOG.debug("shutdown scanner looking at " + row.toString());
|
||||
}
|
||||
|
||||
// Check server name. If null, be conservative and treat as though
|
||||
// region had been on shutdown server (could be null because we
|
||||
// missed edits in hlog because hdfs does not do write-append).
|
||||
String serverName;
|
||||
try {
|
||||
serverName = Writables.bytesToString(map.get(COL_SERVER));
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
LOG.error("Server name", e);
|
||||
break;
|
||||
}
|
||||
if (serverName.length() > 0 &&
|
||||
deadServerName.compareTo(serverName) != 0) {
|
||||
// This isn't the server you're looking for - move along
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Server name " + serverName + " is not same as " +
|
||||
deadServerName + ": Passing");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Bingo! Found it.
|
||||
HRegionInfo info = master.getHRegionInfo(map);
|
||||
if (info == null) {
|
||||
continue;
|
||||
}
|
||||
LOG.info(info.getRegionName() + " was on shutdown server <" +
|
||||
serverName + "> (or server is null). Marking unassigned in " +
|
||||
"meta and clearing pendingRegions");
|
||||
|
||||
if (info.isMetaTable()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("removing meta region " + info.getRegionName() +
|
||||
" from online meta regions");
|
||||
}
|
||||
master.onlineMetaRegions.remove(info.getStartKey());
|
||||
}
|
||||
|
||||
ToDoEntry todo = new ToDoEntry(row, info);
|
||||
toDoList.add(todo);
|
||||
|
||||
if (master.killList.containsKey(deadServerName)) {
|
||||
HashMap<Text, HRegionInfo> regionsToKill =
|
||||
new HashMap<Text, HRegionInfo>();
|
||||
synchronized (master.killList) {
|
||||
regionsToKill.putAll(master.killList.get(deadServerName));
|
||||
}
|
||||
|
||||
if (regionsToKill.containsKey(info.getRegionName())) {
|
||||
regionsToKill.remove(info.getRegionName());
|
||||
master.killList.put(deadServerName, regionsToKill);
|
||||
master.unassignedRegions.remove(info);
|
||||
synchronized (master.regionsToDelete) {
|
||||
if (master.regionsToDelete.contains(info.getRegionName())) {
|
||||
// Delete this region
|
||||
master.regionsToDelete.remove(info.getRegionName());
|
||||
todo.deleteRegion = true;
|
||||
} else {
|
||||
// Mark region offline
|
||||
todo.regionOffline = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// Get region reassigned
|
||||
regions.add(info);
|
||||
|
||||
// If it was pending, remove.
|
||||
// Otherwise will obstruct its getting reassigned.
|
||||
master.pendingRegions.remove(info.getRegionName());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if(scannerId != -1L) {
|
||||
try {
|
||||
server.close(scannerId);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Closing scanner",
|
||||
RemoteExceptionHandler.checkIOException(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update server in root/meta entries
|
||||
for (ToDoEntry e: toDoList) {
|
||||
if (e.deleteRegion) {
|
||||
HRegion.removeRegionFromMETA(server, regionName, e.row);
|
||||
} else if (e.regionOffline) {
|
||||
HRegion.offlineRegionInMETA(server, regionName, e.info);
|
||||
}
|
||||
}
|
||||
|
||||
// Get regions reassigned
|
||||
for (HRegionInfo info: regions) {
|
||||
master.unassignedRegions.put(info, ZERO_L);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean process() throws IOException {
|
||||
LOG.info("process shutdown of server " + deadServer + ": logSplit: " +
|
||||
this.logSplit + ", rootRescanned: " + this.rootRescanned +
|
||||
", numberOfMetaRegions: " + master.numberOfMetaRegions.get() +
|
||||
", onlineMetaRegions.size(): " + master.onlineMetaRegions.size());
|
||||
|
||||
if (!logSplit) {
|
||||
// Process the old log file
|
||||
if (master.fs.exists(oldLogDir)) {
|
||||
if (!master.splitLogLock.tryLock()) {
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
HLog.splitLog(master.rootdir, oldLogDir, master.fs, master.conf);
|
||||
} finally {
|
||||
master.splitLogLock.unlock();
|
||||
}
|
||||
}
|
||||
logSplit = true;
|
||||
}
|
||||
|
||||
if (!rootAvailable()) {
|
||||
// Return true so that worker does not put this request back on the
|
||||
// toDoQueue.
|
||||
// rootAvailable() has already put it on the delayedToDoQueue
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!rootRescanned) {
|
||||
// Scan the ROOT region
|
||||
|
||||
HRegionInterface server = null;
|
||||
long scannerId = -1L;
|
||||
for (int tries = 0; tries < numRetries; tries ++) {
|
||||
if (master.closed.get()) {
|
||||
return true;
|
||||
}
|
||||
server = master.connection.getHRegionConnection(master.rootRegionLocation.get());
|
||||
scannerId = -1L;
|
||||
|
||||
try {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("process server shutdown scanning root region on " +
|
||||
master.rootRegionLocation.get().getBindAddress());
|
||||
}
|
||||
scannerId =
|
||||
server.openScanner(HRegionInfo.rootRegionInfo.getRegionName(),
|
||||
COLUMN_FAMILY_ARRAY, EMPTY_START_ROW,
|
||||
System.currentTimeMillis(), null);
|
||||
|
||||
scanMetaRegion(server, scannerId,
|
||||
HRegionInfo.rootRegionInfo.getRegionName());
|
||||
break;
|
||||
|
||||
} catch (IOException e) {
|
||||
if (tries == numRetries - 1) {
|
||||
throw RemoteExceptionHandler.checkIOException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("process server shutdown scanning root region on " +
|
||||
master.rootRegionLocation.get().getBindAddress() + " finished " +
|
||||
Thread.currentThread().getName());
|
||||
}
|
||||
rootRescanned = true;
|
||||
}
|
||||
|
||||
if (!metaTableAvailable()) {
|
||||
// We can't proceed because not all meta regions are online.
|
||||
// metaAvailable() has put this request on the delayedToDoQueue
|
||||
// Return true so that worker does not put this on the toDoQueue
|
||||
return true;
|
||||
}
|
||||
|
||||
for (int tries = 0; tries < numRetries; tries++) {
|
||||
try {
|
||||
if (master.closed.get()) {
|
||||
return true;
|
||||
}
|
||||
List<MetaRegion> regions = new ArrayList<MetaRegion>();
|
||||
synchronized (master.onlineMetaRegions) {
|
||||
regions.addAll(master.onlineMetaRegions.values());
|
||||
}
|
||||
for (MetaRegion r: regions) {
|
||||
HRegionInterface server = null;
|
||||
long scannerId = -1L;
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("process server shutdown scanning " +
|
||||
r.getRegionName() + " on " + r.getServer() + " " +
|
||||
Thread.currentThread().getName());
|
||||
}
|
||||
server = master.connection.getHRegionConnection(r.getServer());
|
||||
|
||||
scannerId =
|
||||
server.openScanner(r.getRegionName(), COLUMN_FAMILY_ARRAY,
|
||||
EMPTY_START_ROW, System.currentTimeMillis(), null);
|
||||
|
||||
scanMetaRegion(server, scannerId, r.getRegionName());
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("process server shutdown finished scanning " +
|
||||
r.getRegionName() + " on " + r.getServer() + " " +
|
||||
Thread.currentThread().getName());
|
||||
}
|
||||
}
|
||||
master.deadServers.remove(deadServerName);
|
||||
break;
|
||||
|
||||
} catch (IOException e) {
|
||||
if (tries == numRetries - 1) {
|
||||
throw RemoteExceptionHandler.checkIOException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.util.concurrent.Delayed;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
||||
abstract class RegionServerOperation implements Delayed, HConstants {
|
||||
protected static final Log LOG =
|
||||
LogFactory.getLog(RegionServerOperation.class.getName());
|
||||
|
||||
private long expire;
|
||||
protected final HMaster master;
|
||||
protected final int numRetries;
|
||||
|
||||
protected RegionServerOperation(HMaster master) {
|
||||
this.master = master;
|
||||
this.numRetries = master.numRetries;
|
||||
// Set the future time at which we expect to be released from the
|
||||
// DelayQueue we're inserted in on lease expiration.
|
||||
this.expire = System.currentTimeMillis() + this.master.leaseTimeout / 2;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public long getDelay(TimeUnit unit) {
|
||||
return unit.convert(this.expire - System.currentTimeMillis(),
|
||||
TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public int compareTo(Delayed o) {
|
||||
return Long.valueOf(getDelay(TimeUnit.MILLISECONDS)
|
||||
- o.getDelay(TimeUnit.MILLISECONDS)).intValue();
|
||||
}
|
||||
|
||||
protected void requeue() {
|
||||
this.expire = System.currentTimeMillis() + this.master.leaseTimeout / 2;
|
||||
master.delayedToDoQueue.put(this);
|
||||
}
|
||||
|
||||
protected boolean rootAvailable() {
|
||||
boolean available = true;
|
||||
if (master.rootRegionLocation.get() == null) {
|
||||
available = false;
|
||||
requeue();
|
||||
}
|
||||
return available;
|
||||
}
|
||||
|
||||
protected boolean metaTableAvailable() {
|
||||
boolean available = true;
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("numberOfMetaRegions: " + master.numberOfMetaRegions.get() +
|
||||
", onlineMetaRegions.size(): " + master.onlineMetaRegions.size());
|
||||
}
|
||||
if (master.numberOfMetaRegions.get() != master.onlineMetaRegions.size()) {
|
||||
// We can't proceed because not all of the meta regions are online.
|
||||
// We can't block either because that would prevent the meta region
|
||||
// online message from being processed. In order to prevent spinning
|
||||
// in the run queue, put this request on the delay queue to give
|
||||
// other threads the opportunity to get the meta regions on-line.
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Requeuing because not all meta regions are online");
|
||||
}
|
||||
available = false;
|
||||
requeue();
|
||||
}
|
||||
return available;
|
||||
}
|
||||
|
||||
protected abstract boolean process() throws IOException;
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
/** Scanner for the <code>ROOT</code> HRegion. */
|
||||
class RootScanner extends BaseScanner {
|
||||
/** Constructor */
|
||||
public RootScanner(HMaster master) {
|
||||
super(master, true, master.metaRescanInterval, master.closed);
|
||||
}
|
||||
|
||||
private boolean scanRoot() {
|
||||
// Don't retry if we get an error while scanning. Errors are most often
|
||||
// caused by the server going away. Wait until next rescan interval when
|
||||
// things should be back to normal
|
||||
boolean scanSuccessful = false;
|
||||
synchronized (master.rootRegionLocation) {
|
||||
while(!master.closed.get() && master.rootRegionLocation.get() == null) {
|
||||
// rootRegionLocation will be filled in when we get an 'open region'
|
||||
// regionServerReport message from the HRegionServer that has been
|
||||
// allocated the ROOT region below.
|
||||
try {
|
||||
master.rootRegionLocation.wait();
|
||||
} catch (InterruptedException e) {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if (master.closed.get()) {
|
||||
return scanSuccessful;
|
||||
}
|
||||
|
||||
try {
|
||||
// Don't interrupt us while we're working
|
||||
synchronized(master.rootScannerLock) {
|
||||
scanRegion(new MetaRegion(master.rootRegionLocation.get(),
|
||||
HRegionInfo.rootRegionInfo.getRegionName(), null));
|
||||
}
|
||||
scanSuccessful = true;
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
LOG.warn("Scan ROOT region", e);
|
||||
// Make sure the file system is still available
|
||||
master.checkFileSystem();
|
||||
} catch (Exception e) {
|
||||
// If for some reason we get some other kind of exception,
|
||||
// at least log it rather than go out silently.
|
||||
LOG.error("Unexpected exception", e);
|
||||
}
|
||||
return scanSuccessful;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean initialScan() {
|
||||
master.rootScanned = scanRoot();
|
||||
return master.rootScanned;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void maintenanceScan() {
|
||||
scanRoot();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
|
||||
/*
|
||||
* Data structure used to return results out of the toRowMap method.
|
||||
*/
|
||||
class RowMap {
|
||||
static final Log LOG = LogFactory.getLog(RowMap.class.getName());
|
||||
|
||||
final Text row;
|
||||
final SortedMap<Text, byte[]> map;
|
||||
|
||||
RowMap(final Text r, final SortedMap<Text, byte[]> m) {
|
||||
this.row = r;
|
||||
this.map = m;
|
||||
}
|
||||
|
||||
Text getRow() {
|
||||
return this.row;
|
||||
}
|
||||
|
||||
SortedMap<Text, byte[]> getMap() {
|
||||
return this.map;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert an HbaseMapWritable to a Map keyed by column.
|
||||
* Utility method used scanning meta regions
|
||||
* @param mw The MapWritable to convert. Cannot be null.
|
||||
* @return Returns a SortedMap currently. TODO: This looks like it could
|
||||
* be a plain Map.
|
||||
*/
|
||||
public static RowMap fromHbaseMapWritable(HbaseMapWritable mw) {
|
||||
if (mw == null) {
|
||||
throw new IllegalArgumentException("Passed MapWritable cannot be null");
|
||||
}
|
||||
SortedMap<Text, byte[]> m = new TreeMap<Text, byte[]>();
|
||||
Text row = null;
|
||||
for (Map.Entry<Writable, Writable> e: mw.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
Text thisRow = key.getRow();
|
||||
if (row == null) {
|
||||
row = thisRow;
|
||||
} else {
|
||||
if (!row.equals(thisRow)) {
|
||||
LOG.error("Multiple rows in same scanner result set. firstRow=" +
|
||||
row + ", currentRow=" + thisRow);
|
||||
}
|
||||
}
|
||||
m.put(key.getColumn(), ((ImmutableBytesWritable) e.getValue()).get());
|
||||
}
|
||||
return new RowMap(row, m);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegion;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/**
|
||||
* Instantiated to delete a table
|
||||
* Note that it extends ChangeTableState, which takes care of disabling
|
||||
* the table.
|
||||
*/
|
||||
class TableDelete extends ChangeTableState {
|
||||
|
||||
TableDelete(final HMaster master, final Text tableName) throws IOException {
|
||||
super(master, tableName, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postProcessMeta(MetaRegion m, HRegionInterface server)
|
||||
throws IOException {
|
||||
// For regions that are being served, mark them for deletion
|
||||
for (HashSet<HRegionInfo> s: servedRegions.values()) {
|
||||
for (HRegionInfo i: s) {
|
||||
this.master.regionsToDelete.add(i.getRegionName());
|
||||
}
|
||||
}
|
||||
|
||||
// Unserved regions we can delete now
|
||||
for (HRegionInfo i: unservedRegions) {
|
||||
// Delete the region
|
||||
try {
|
||||
HRegion.deleteRegion(this.master.fs, this.master.rootdir, i);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOG.error("failed to delete region " + i.getRegionName(),
|
||||
RemoteExceptionHandler.checkIOException(e));
|
||||
}
|
||||
}
|
||||
super.postProcessMeta(m, server);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void updateRegionInfo(BatchUpdate b,
|
||||
@SuppressWarnings("unused") HRegionInfo info) {
|
||||
for (int i = 0; i < ALL_META_COLUMNS.length; i++) {
|
||||
// Be sure to clean all cells
|
||||
b.delete(ALL_META_COLUMNS[i]);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,184 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
abstract class TableOperation implements HConstants {
|
||||
static final Long ZERO_L = Long.valueOf(0L);
|
||||
|
||||
protected static final Log LOG =
|
||||
LogFactory.getLog(TableOperation.class.getName());
|
||||
|
||||
protected Set<MetaRegion> metaRegions;
|
||||
protected Text tableName;
|
||||
protected Set<HRegionInfo> unservedRegions;
|
||||
protected HMaster master;
|
||||
protected final int numRetries;
|
||||
|
||||
protected TableOperation(final HMaster master, final Text tableName)
|
||||
throws IOException {
|
||||
this.numRetries = master.numRetries;
|
||||
|
||||
this.master = master;
|
||||
|
||||
if (!this.master.isMasterRunning()) {
|
||||
throw new MasterNotRunningException();
|
||||
}
|
||||
|
||||
this.metaRegions = new HashSet<MetaRegion>();
|
||||
this.tableName = tableName;
|
||||
this.unservedRegions = new HashSet<HRegionInfo>();
|
||||
|
||||
// We can not access any meta region if they have not already been
|
||||
// assigned and scanned.
|
||||
|
||||
if (this.master.metaScannerThread.waitForMetaRegionsOrClose()) {
|
||||
throw new MasterNotRunningException(); // We're shutting down. Forget it.
|
||||
}
|
||||
|
||||
Text firstMetaRegion = null;
|
||||
synchronized (this.master.onlineMetaRegions) {
|
||||
if (this.master.onlineMetaRegions.size() == 1) {
|
||||
firstMetaRegion = this.master.onlineMetaRegions.firstKey();
|
||||
|
||||
} else if (this.master.onlineMetaRegions.containsKey(tableName)) {
|
||||
firstMetaRegion = tableName;
|
||||
|
||||
} else {
|
||||
firstMetaRegion = this.master.onlineMetaRegions.headMap(tableName).lastKey();
|
||||
}
|
||||
this.metaRegions.addAll(this.master.onlineMetaRegions.tailMap(
|
||||
firstMetaRegion).values());
|
||||
}
|
||||
}
|
||||
|
||||
void process() throws IOException {
|
||||
for (int tries = 0; tries < numRetries; tries++) {
|
||||
boolean tableExists = false;
|
||||
try {
|
||||
// Prevent meta scanner from running
|
||||
synchronized(this.master.metaScannerLock) {
|
||||
for (MetaRegion m: metaRegions) {
|
||||
|
||||
// Get a connection to a meta server
|
||||
|
||||
HRegionInterface server =
|
||||
this.master.connection.getHRegionConnection(m.getServer());
|
||||
|
||||
// Open a scanner on the meta region
|
||||
|
||||
long scannerId =
|
||||
server.openScanner(m.getRegionName(), COLUMN_FAMILY_ARRAY,
|
||||
tableName, System.currentTimeMillis(), null);
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
HbaseMapWritable values = server.next(scannerId);
|
||||
if(values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
RowMap rm = RowMap.fromHbaseMapWritable(values);
|
||||
SortedMap<Text, byte[]> map = rm.getMap();
|
||||
HRegionInfo info = this.master.getHRegionInfo(map);
|
||||
if (info == null) {
|
||||
throw new IOException(COL_REGIONINFO + " not found on " +
|
||||
rm.getRow());
|
||||
}
|
||||
String serverName = Writables.bytesToString(map.get(COL_SERVER));
|
||||
long startCode = Writables.bytesToLong(map.get(COL_STARTCODE));
|
||||
if (info.getTableDesc().getName().compareTo(tableName) > 0) {
|
||||
break; // Beyond any more entries for this table
|
||||
}
|
||||
|
||||
tableExists = true;
|
||||
if (!isBeingServed(serverName, startCode)) {
|
||||
unservedRegions.add(info);
|
||||
}
|
||||
processScanItem(serverName, startCode, info);
|
||||
} // while(true)
|
||||
} finally {
|
||||
if (scannerId != -1L) {
|
||||
try {
|
||||
server.close(scannerId);
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
LOG.error("closing scanner", e);
|
||||
}
|
||||
}
|
||||
scannerId = -1L;
|
||||
}
|
||||
|
||||
if (!tableExists) {
|
||||
throw new IOException(tableName + " does not exist");
|
||||
}
|
||||
|
||||
postProcessMeta(m, server);
|
||||
unservedRegions.clear();
|
||||
|
||||
} // for(MetaRegion m:)
|
||||
} // synchronized(metaScannerLock)
|
||||
|
||||
} catch (IOException e) {
|
||||
if (tries == numRetries - 1) {
|
||||
// No retries left
|
||||
this.master.checkFileSystem();
|
||||
throw RemoteExceptionHandler.checkIOException(e);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} // for(tries...)
|
||||
}
|
||||
|
||||
protected boolean isBeingServed(String serverName, long startCode) {
|
||||
boolean result = false;
|
||||
if (serverName != null && serverName.length() > 0 && startCode != -1L) {
|
||||
HServerInfo s = this.master.serversToServerInfo.get(serverName);
|
||||
result = s != null && s.getStartCode() == startCode;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
protected boolean isEnabled(HRegionInfo info) {
|
||||
return !info.isOffline();
|
||||
}
|
||||
|
||||
protected abstract void processScanItem(String serverName, long startCode,
|
||||
HRegionInfo info) throws IOException;
|
||||
|
||||
protected abstract void postProcessMeta(MetaRegion m,
|
||||
HRegionInterface server) throws IOException;
|
||||
}
|
|
@ -4,11 +4,10 @@
|
|||
import="org.apache.hadoop.util.VersionInfo"
|
||||
import="org.apache.hadoop.hbase.master.HMaster"
|
||||
import="org.apache.hadoop.hbase.HConstants"
|
||||
import="org.apache.hadoop.hbase.master.HMaster.MetaRegion"
|
||||
import="org.apache.hadoop.hbase.master.MetaRegion"
|
||||
import="org.apache.hadoop.hbase.HBaseAdmin"
|
||||
import="org.apache.hadoop.hbase.HServerInfo"
|
||||
import="org.apache.hadoop.hbase.HServerAddress"
|
||||
import="org.apache.hadoop.hbase.HRegionInfo"
|
||||
import="org.apache.hadoop.hbase.HBaseConfiguration"
|
||||
import="org.apache.hadoop.hbase.hql.ShowCommand"
|
||||
import="org.apache.hadoop.hbase.hql.TableFormatter"
|
||||
|
@ -56,7 +55,7 @@
|
|||
<tr><td><%= HConstants.ROOT_TABLE_NAME.toString() %></td><td><%= rootLocation.toString() %></td></tr>
|
||||
<%
|
||||
if (onlineRegions != null && onlineRegions.size() > 0) { %>
|
||||
<% for (Map.Entry<Text, HMaster.MetaRegion> e: onlineRegions.entrySet()) {
|
||||
<% for (Map.Entry<Text, MetaRegion> e: onlineRegions.entrySet()) {
|
||||
MetaRegion meta = e.getValue();
|
||||
%>
|
||||
<tr><td><%= meta.getRegionName().toString() %></td><td><%= meta.getServer().toString() %></td></tr>
|
||||
|
|
Loading…
Reference in New Issue