HBASE-1854 Remove the Region Historian

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@816823 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2009-09-19 00:13:39 +00:00
parent 4c6d3ff0d4
commit ce44c03988
10 changed files with 3 additions and 474 deletions

View File

@ -62,6 +62,7 @@ Release 0.21.0 - Unreleased
HBASE-1574 Client and server APIs to do batch deletes
HBASE-1833 hfile.main fixes
HBASE-1684 Backup (Export/Import) contrib tool for 0.20
HBASE-1854 Remove the Region Historian
OPTIMIZATIONS

View File

@ -1,337 +0,0 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.GregorianCalendar;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
/**
* The Region Historian task is to keep track of every modification a region
* has to go through. Public methods are used to update the information in the
* <code>.META.</code> table and to retrieve it. This is a Singleton. By
* default, the Historian is offline; it will not log. Its enabled in the
* regionserver and master down in their guts after there's some certainty the
* .META. has been deployed.
*/
public class RegionHistorian implements HConstants {
private static final Log LOG = LogFactory.getLog(RegionHistorian.class);
private HTable metaTable;
/** Singleton reference */
private static RegionHistorian historian;
/** Date formater for the timestamp in RegionHistoryInformation */
static SimpleDateFormat dateFormat = new SimpleDateFormat(
"EEE, d MMM yyyy HH:mm:ss");
private static enum HistorianQualifierKey {
REGION_CREATION ( Bytes.toBytes("creation")),
REGION_OPEN ( Bytes.toBytes("open")),
REGION_SPLIT ( Bytes.toBytes("split")),
REGION_COMPACTION ( Bytes.toBytes("compaction")),
REGION_FLUSH ( Bytes.toBytes("flush")),
REGION_ASSIGNMENT ( Bytes.toBytes("assignment"));
byte[] key;
HistorianQualifierKey(byte[] key) {
this.key = key;
}
}
public static final String SPLIT_PREFIX = "Region split from: ";
/**
* Default constructor. Initializes reference to .META. table. Inaccessible.
* Use {@link #getInstance(HBaseConfiguration)} to obtain the Singleton
* instance of this class.
*/
private RegionHistorian() {
super();
}
/**
* Get the RegionHistorian Singleton instance.
* @return The region historian
*/
public synchronized static RegionHistorian getInstance() {
if (historian == null) {
historian = new RegionHistorian();
}
return historian;
}
/**
* Returns, for a given region name, an ordered list by timestamp of all
* values in the historian column of the .META. table.
* @param regionName
* Region name as a string
* @return List of RegionHistoryInformation or null if we're offline.
*/
public List<RegionHistoryInformation> getRegionHistory(byte [] regionName) {
if (!isOnline()) {
return null;
}
List<RegionHistoryInformation> informations =
new ArrayList<RegionHistoryInformation>();
try {
/*
* TODO REGION_HISTORIAN_KEYS is used because there is no other for the
* moment to retrieve all version and to have the column key information.
* To be changed when HTable.getRow handles versions.
*/
for (HistorianQualifierKey keyEnu : HistorianQualifierKey.values()) {
byte[] columnKey = keyEnu.key;
Get get = new Get(regionName);
get.addColumn(CATALOG_HISTORIAN_FAMILY, columnKey);
get.setMaxVersions(ALL_VERSIONS);
Result result = this.metaTable.get(get);
if (result != null) {
for(KeyValue kv : result.raw()) {
informations.add(historian.new RegionHistoryInformation(
kv.getTimestamp(), columnKey, kv.getValue()));
}
}
}
} catch (IOException ioe) {
LOG.warn("Unable to retrieve region history", ioe);
}
Collections.sort(informations);
return informations;
}
/**
* Method to add a creation event to the row in the .META table
* @param info
* @param serverName
*/
public void addRegionAssignment(HRegionInfo info, String serverName) {
add(HistorianQualifierKey.REGION_ASSIGNMENT.key, "Region assigned to server "
+ serverName, info);
}
/**
* Method to add a creation event to the row in the .META table
* @param info
*/
public void addRegionCreation(HRegionInfo info) {
add(HistorianQualifierKey.REGION_CREATION.key, "Region creation", info);
}
/**
* Method to add a opening event to the row in the .META table
* @param info
* @param address
*/
public void addRegionOpen(HRegionInfo info, HServerAddress address) {
add(HistorianQualifierKey.REGION_OPEN.key, "Region opened on server : "
+ address.getHostname(), info);
}
/**
* Method to add a split event to the rows in the .META table with
* information from oldInfo.
* @param oldInfo
* @param newInfo1
* @param newInfo2
*/
public void addRegionSplit(HRegionInfo oldInfo, HRegionInfo newInfo1,
HRegionInfo newInfo2) {
HRegionInfo[] infos = new HRegionInfo[] { newInfo1, newInfo2 };
for (HRegionInfo info : infos) {
add(HistorianQualifierKey.REGION_SPLIT.key, SPLIT_PREFIX +
oldInfo.getRegionNameAsString(), info);
}
}
/**
* Method to add a compaction event to the row in the .META table
* @param info
* @param timeTaken
*/
public void addRegionCompaction(final HRegionInfo info,
final String timeTaken) {
// While historian can not log flushes because it could deadlock the
// regionserver -- see the note in addRegionFlush -- there should be no
// such danger compacting; compactions are not allowed when
// Flusher#flushSomeRegions is run.
if (LOG.isDebugEnabled()) {
add(HistorianQualifierKey.REGION_COMPACTION.key,
"Region compaction completed in " + timeTaken, info);
}
}
/**
* Method to add a flush event to the row in the .META table
* @param info
* @param timeTaken
*/
public void addRegionFlush(HRegionInfo info, String timeTaken) {
// Disabled. Noop. If this regionserver is hosting the .META. AND is
// holding the reclaimMemcacheMemory global lock --
// see Flusher#flushSomeRegions -- we deadlock. For now, just disable
// logging of flushes.
}
/**
* Method to add an event with LATEST_TIMESTAMP.
* @param column
* @param text
* @param info
*/
private void add(byte [] qualifier, String text, HRegionInfo info) {
add(qualifier, text, info, LATEST_TIMESTAMP);
}
/**
* Method to add an event with provided information.
* @param column
* @param text
* @param info
* @param timestamp
*/
private void add(byte [] qualifier, String text, HRegionInfo info,
long timestamp) {
if (!isOnline()) {
// Its a noop
return;
}
if (!info.isMetaRegion()) {
Put put = new Put(info.getRegionName());
put.setTimeStamp(timestamp);
put.add(HConstants.CATALOG_HISTORIAN_FAMILY, qualifier,
Bytes.toBytes(text));
try {
this.metaTable.put(put);
} catch (IOException ioe) {
LOG.warn("Unable to '" + text + "'", ioe);
}
}
}
/**
* Inner class that only contains information about an event.
*
*/
public class RegionHistoryInformation implements
Comparable<RegionHistoryInformation> {
private GregorianCalendar cal = new GregorianCalendar();
private long timestamp;
private byte [] event = null;
private byte [] description = null;
/**
* @param timestamp
* @param event
* @param description
*/
public RegionHistoryInformation(long timestamp, byte [] event,
byte [] description) {
this.timestamp = timestamp;
this.event = event;
this.description = description;
}
public int compareTo(RegionHistoryInformation otherInfo) {
return -1 * Long.valueOf(timestamp).compareTo(otherInfo.getTimestamp());
}
/** @return the event */
public String getEvent() {
return Bytes.toString(event);
}
/** @return the description */
public String getDescription() {
return Bytes.toString(description);
}
/** @return the timestamp */
public long getTimestamp() {
return timestamp;
}
/**
* @return The value of the timestamp processed with the date formater.
*/
public String getTimestampAsString() {
cal.setTimeInMillis(timestamp);
return dateFormat.format(cal.getTime());
}
}
/**
* @return True if the historian is online. When offline, will not add
* updates to the .META. table.
*/
public boolean isOnline() {
return this.metaTable != null;
}
/**
* @param c Online the historian. Invoke after cluster has spun up.
*/
public void online(final HBaseConfiguration c) {
try {
this.metaTable = new HTable(c, META_TABLE_NAME);
if (LOG.isDebugEnabled()) {
LOG.debug("Onlined");
}
} catch (IOException ioe) {
LOG.error("Unable to create RegionHistorian", ioe);
}
}
/**
* Offlines the historian.
* @see #online(HBaseConfiguration)
*/
public void offline() {
this.metaTable = null;
if (LOG.isDebugEnabled()) {
LOG.debug("Offlined");
}
}
public static final long FIXED_OVERHEAD = ClassSize.align(
ClassSize.OBJECT + ClassSize.REFERENCE);
}

View File

@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.client.Get;
@ -438,7 +437,6 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
/*
* Clean up and close up shop
*/
RegionHistorian.getInstance().offline();
if (this.infoServer != null) {
LOG.info("Stopping infoServer");
try {

View File

@ -24,7 +24,6 @@ import java.io.IOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
@ -66,8 +65,6 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
// back on the toDoQueue
return true;
}
final RegionHistorian historian = RegionHistorian.getInstance();
HRegionInterface server =
master.connection.getHRegionConnection(getMetaRegion().getServer());
LOG.info(regionInfo.getRegionNameAsString() + " open on " +
@ -83,13 +80,6 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
" in region " + Bytes.toString(metaRegionName) + " with startcode=" +
serverInfo.getStartCode() + ", server=" + serverInfo.getServerAddress());
if (!historian.isOnline()) {
// This is safest place to do the onlining of the historian in
// the master. When we get to here, we know there is a .META.
// for the historian to go against.
historian.online(this.master.getConfiguration());
}
historian.addRegionOpen(regionInfo, serverInfo.getServerAddress());
synchronized (master.regionManager) {
if (isMetaTable) {
// It's a meta region.

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -105,7 +104,6 @@ class RegionManager implements HConstants {
private final int maxAssignInOneGo;
final HMaster master;
private final RegionHistorian historian;
private final LoadBalancer loadBalancer;
/** Set of regions to split. */
@ -137,7 +135,6 @@ class RegionManager implements HConstants {
HBaseConfiguration conf = master.getConfiguration();
this.master = master;
this.historian = RegionHistorian.getInstance();
this.maxAssignInOneGo = conf.getInt("hbase.regions.percheckin", 10);
this.loadBalancer = new LoadBalancer(conf);
@ -325,31 +322,6 @@ class RegionManager implements HConstants {
rs.setPendingOpen(sinfo.getServerName());
this.regionsInTransition.put(regionName, rs);
// Since the meta/root may not be available at this moment, we
try {
// TODO move this into an actual class, and use the RetryableMetaOperation
master.toDoQueue.put(
new RegionServerOperation(master) {
protected boolean process() throws IOException {
if (!rootAvailable() || !metaTableAvailable()) {
return true; // the two above us will put us on the delayed queue
}
// this call can cause problems if meta/root is offline!
historian.addRegionAssignment(rs.getRegionInfo(),
sinfo.getServerName());
return true;
}
public String toString() {
return "RegionAssignmentHistorian from " + sinfo.getServerName();
}
}
);
} catch (InterruptedException e) {
// ignore and don't write the region historian
LOG.info("doRegionAssignment: Couldn't queue the region historian due to exception: " + e);
}
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, rs.getRegionInfo()));
}

View File

@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@ -112,7 +111,6 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
* Once set, it is never cleared.
*/
final AtomicBoolean closing = new AtomicBoolean(false);
private final RegionHistorian historian;
//////////////////////////////////////////////////////////////////////////////
// Members
@ -220,7 +218,6 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
this.conf = null;
this.flushListener = null;
this.fs = null;
this.historian = null;
this.memstoreFlushSize = 0;
this.log = null;
this.regionCompactionDir = null;
@ -261,7 +258,6 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
this.threadWakeFrequency = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
String encodedNameStr = Integer.toString(this.regionInfo.getEncodedName());
this.regiondir = new Path(basedir, encodedNameStr);
this.historian = RegionHistorian.getInstance();
if (LOG.isDebugEnabled()) {
// Write out region name as string and its encoded name.
LOG.debug("Opening region " + this + ", encoded=" +
@ -668,8 +664,6 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
LOG.debug("Cleaned up " + FSUtils.getPath(splits) + " " + deleted);
}
HRegion regions[] = new HRegion [] {regionA, regionB};
this.historian.addRegionSplit(this.regionInfo,
regionA.getRegionInfo(), regionB.getRegionInfo());
return regions;
}
}
@ -785,7 +779,6 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
String timeTaken = StringUtils.formatTimeDiff(System.currentTimeMillis(),
startTime);
LOG.info("compaction completed on region " + this + " in " + timeTaken);
this.historian.addRegionCompaction(regionInfo, timeTaken);
} finally {
synchronized (writestate) {
writestate.compacting = false;
@ -970,14 +963,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
if (LOG.isDebugEnabled()) {
long now = System.currentTimeMillis();
String timeTaken = StringUtils.formatTimeDiff(now, startTime);
LOG.debug("Finished memstore flush of ~" +
StringUtils.humanReadableInt(currentMemStoreSize) + " for region " +
this + " in " + (now - startTime) + "ms, sequence id=" + sequenceId +
", compaction requested=" + compactionRequested);
if (!regionInfo.isMetaRegion()) {
this.historian.addRegionFlush(regionInfo, timeTaken);
}
}
return compactionRequested;
}
@ -1834,10 +1823,6 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(regionDir);
// Note in historian the creation of new region.
if (!info.isMetaRegion()) {
RegionHistorian.getInstance().addRegionCreation(info);
}
HRegion region = new HRegion(tableDir,
new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null),
fs, conf, info, null);
@ -2381,7 +2366,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
public static final long FIXED_OVERHEAD = ClassSize.align(
(3 * Bytes.SIZEOF_LONG) + (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN +
(20 * ClassSize.REFERENCE) + ClassSize.OBJECT);
(19 * ClassSize.REFERENCE) + ClassSize.OBJECT);
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) +
@ -2390,7 +2375,6 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
(16 * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
(16 * ClassSize.CONCURRENT_HASHMAP_SEGMENT) +
ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY +
RegionHistorian.FIXED_OVERHEAD + HLog.FIXED_OVERHEAD +
ClassSize.align(ClassSize.OBJECT + (5 * Bytes.SIZEOF_BOOLEAN)) +
(3 * ClassSize.REENTRANT_LOCK));

View File

@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.LeaseListener;
import org.apache.hadoop.hbase.Leases;
import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.UnknownRowLockException;
import org.apache.hadoop.hbase.UnknownScannerException;
@ -601,7 +600,6 @@ public class HRegionServer implements HConstants, HRegionInterface,
abort();
}
}
RegionHistorian.getInstance().offline();
this.leases.closeAfterLeasesExpire();
this.worker.stop();
this.server.stop();
@ -1558,11 +1556,6 @@ public class HRegionServer implements HConstants, HRegionInterface,
}
void openRegion(final HRegionInfo regionInfo) {
// If historian is not online and this is not a meta region, online it.
if (!regionInfo.isMetaRegion() &&
!RegionHistorian.getInstance().isOnline()) {
RegionHistorian.getInstance().online(this.conf);
}
Integer mapKey = Bytes.mapKey(regionInfo.getRegionName());
HRegion region = this.onlineRegions.get(mapKey);
if (region == null) {

View File

@ -18,7 +18,6 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
@ -290,15 +289,6 @@ public class TestHeapSize extends TestCase {
assertEquals(expected, actual);
}
// RegionHistorian Overhead
cl = RegionHistorian.class;
actual = RegionHistorian.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(cl, false);
if(expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}
// Currently NOT testing Deep Overheads of many of these classes.
// Deep overheads cover a vast majority of stuff, but will not be 100%
// accurate because it's unclear when we're referencing stuff that's already

View File

@ -1,58 +0,0 @@
<%@ page contentType="text/html;charset=UTF-8"
import="java.util.List"
import="java.util.regex.*"
import="java.net.URLEncoder"
import="org.apache.hadoop.hbase.RegionHistorian"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.RegionHistorian.RegionHistoryInformation"
import="org.apache.hadoop.hbase.HConstants"%>
<%@ page import="org.apache.hadoop.hbase.util.Bytes" %>
<%
String regionName = request.getParameter("regionname");
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
List<RegionHistoryInformation> informations = RegionHistorian.getInstance().getRegionHistory(Bytes.toBytesBinary(regionName));
// Pattern used so we can wrap a regionname in an href.
Pattern pattern = Pattern.compile(RegionHistorian.SPLIT_PREFIX + "(.*)$");
%><?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<meta http-equiv="refresh" content="30"/>
<title>Region in <%= regionName %></title>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head>
<body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Region <%= regionName %></h1>
<p id="links_menu"><a href="/master.jsp">Master</a>, <a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>
<hr id="head_rule" />
<%if(informations != null && informations.size() > 0) { %>
<table><tr><th>Timestamp</th><th>Event</th><th>Description</th></tr>
<% for( RegionHistoryInformation information : informations) {
String description = information.getDescription();
Matcher m = pattern.matcher(description);
if (m.matches()) {
// Wrap the region name in an href so user can click on it.
description = RegionHistorian.SPLIT_PREFIX +
"<a href=\"regionhistorian.jsp?regionname=" + URLEncoder.encode(m.group(1), "UTF-8") + "\">" +
m.group(1) + "</a>";
}
%>
<tr><td><%= information.getTimestampAsString() %></td><td><%= information.getEvent() %></td><td><%= description %></td></tr>
<% } %>
</table>
<p>
Master is the source of following events: creation, open, and assignment. Regions are the source of following events: split, compaction, and flush.
</p>
<%} else {%>
<p>
This region is no longer available. It may be due to a split, a merge or the name changed.
</p>
<%} %>
</body>
</html>

View File

@ -131,15 +131,11 @@
int infoPort = serverAddressToServerInfos.get(
hriEntry.getValue()).getInfoPort();
String urlRegionHistorian =
"/regionhistorian.jsp?regionname="+
Bytes.toStringBinary(hriEntry.getKey().getRegionName());
String urlRegionServer =
"http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/";
%>
<tr>
<td><a href="<%= urlRegionHistorian %>"><%= Bytes.toStringBinary(hriEntry.getKey().getRegionName())%></a></td>
<td><%= Bytes.toStringBinary(hriEntry.getKey().getRegionName())%></td>
<td><a href="<%= urlRegionServer %>"><%= hriEntry.getValue().getHostname().toString() + ":" + infoPort %></a></td>
<td><%= hriEntry.getKey().getEncodedName()%></td> <td><%= Bytes.toStringBinary(hriEntry.getKey().getStartKey())%></td>
<td><%= Bytes.toStringBinary(hriEntry.getKey().getEndKey())%></td>