HBASE-4260 Expose a command to manually trigger an HLog roll

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1166524 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-09-08 04:22:42 +00:00
parent bc524bbe59
commit 76de714e2b
8 changed files with 195 additions and 0 deletions

View File

@ -453,6 +453,8 @@ Release 0.91.0 - Unreleased
HBASE-4339 Improve eclipse documentation and project file generation
(Eric Charles)
HBASE-4342 Update Thrift to 0.7.0 (Moaz Reyad)
HBASE-4260 Expose a command to manually trigger an HLog roll
(ramkrishna.s.vasudevan)
TASKS
HBASE-3559 Move report of split to master OFF the heartbeat channel

View File

@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@ -1581,4 +1582,24 @@ public class HBaseAdmin implements Abortable, Closeable {
return this.connection.getHTableDescriptors(tableNames);
}
/**
* Roll the log writer. That is, start writing log messages to a new file.
*
* @param serverName
* The servername of the regionserver. A server name is made of host,
* port and startcode. This is mandatory. Here is an example:
* <code> host187.example.com,60020,1289493121758</code>
* @return If lots of logs, flush the returned regions so next time through
* we can clean logs. Returns null if nothing to flush. Names are actual
* region names as returned by {@link HRegionInfo#getEncodedName()}
* @throws IOException if a remote or network exception occurs
* @throws FailedLogCloseException
*/
public synchronized byte[][] rollHLogWriter(String serverName)
throws IOException, FailedLogCloseException {
ServerName sn = new ServerName(serverName);
HRegionInterface rs = this.connection.getHRegionConnection(
sn.getHostname(), sn.getPort());
return rs.rollHLogWriter();
}
}

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
@ -513,4 +514,14 @@ public interface HRegionInterface extends VersionedProtocol, Stoppable, Abortabl
* @throws IOException exception
*/
public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries() throws IOException;
/**
* Roll the log writer. That is, start writing log messages to a new file.
*
* @throws IOException
* @throws FailedLogCloseException
* @return If lots of logs, flush the returned regions so next time through
* we can clean logs. Returns null if nothing to flush. Names are actual
* region names as returned by {@link HRegionInfo#getEncodedName()}
*/
public byte[][] rollHLogWriter() throws IOException, FailedLogCloseException;
}

View File

@ -119,6 +119,7 @@ import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler;
import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
@ -3088,6 +3089,12 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
return c.getBlockCacheColumnFamilySummaries(this.conf);
}
@Override
public byte[][] rollHLogWriter() throws IOException, FailedLogCloseException {
HLog wal = this.getWAL();
return wal.rollWriter(true);
}
}

View File

@ -60,6 +60,12 @@ module Hbase
@admin.majorCompact(table_or_region_name)
end
#----------------------------------------------------------------------------------------------
# Requests a regionserver's HLog roll
def hlog_roll(server_name)
@admin.rollHLogWriter(server_name)
end
#----------------------------------------------------------------------------------------------
# Requests a table or region split
def split(table_or_region_name, split_point)

View File

@ -265,6 +265,7 @@ Shell.load_command_group(
split
unassign
zk_dump
hlog_roll
]
)

View File

@ -0,0 +1,40 @@
#
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class HlogRoll < Command
def help
return <<-EOF
Roll the log writer. That is, start writing log messages to a new file.
The name of the regionserver should be given as the parameter. A
'server_name' is the host, port plus startcode of a regionserver. For
example: host187.example.com,60020,1289493121758 (find servername in
master ui or when you do detailed status in shell)
EOF
end
def command(server_name)
format_simple_command do
admin.hlog_roll(server_name)
end
end
end
end
end

View File

@ -55,7 +55,10 @@ import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.TestHLogUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -1160,5 +1163,109 @@ public class TestAdmin {
expectedRegions, RegionInfos.size());
}
@Test
public void testHLogRollWriting() throws Exception {
setUpforLogRolling();
String className = this.getClass().getName();
StringBuilder v = new StringBuilder(className);
while (v.length() < 1000) {
v.append(className);
}
byte[] value = Bytes.toBytes(v.toString());
HRegionServer regionServer = startAndWriteData("TestLogRolling", value);
LOG.info("after writing there are "
+ TestHLogUtils.getNumLogFiles(regionServer.getWAL()) + " log files");
// flush all regions
List<HRegion> regions = new ArrayList<HRegion>(regionServer
.getOnlineRegionsLocalContext());
for (HRegion r : regions) {
r.flushcache();
}
admin.rollHLogWriter(regionServer.getServerName().getServerName());
int count = TestHLogUtils.getNumLogFiles(regionServer.getWAL());
LOG.info("after flushing all regions and rolling logs there are " +
count + " log files");
assertTrue(("actual count: " + count), count <= 2);
}
private void setUpforLogRolling() {
// Force a region split after every 768KB
TEST_UTIL.getConfiguration().setLong("hbase.hregion.max.filesize",
768L * 1024L);
// We roll the log after every 32 writes
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32);
TEST_UTIL.getConfiguration().setInt(
"hbase.regionserver.logroll.errors.tolerated", 2);
TEST_UTIL.getConfiguration().setInt("ipc.ping.interval", 10 * 1000);
TEST_UTIL.getConfiguration().setInt("ipc.socket.timeout", 10 * 1000);
TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000);
// For less frequently updated regions flush after every 2 flushes
TEST_UTIL.getConfiguration().setInt(
"hbase.hregion.memstore.optionalflushcount", 2);
// We flush the cache after every 8192 bytes
TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.flush.size",
8192);
// Increase the amount of time between client retries
TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 10 * 1000);
// Reduce thread wake frequency so that other threads can get
// a chance to run.
TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY,
2 * 1000);
/**** configuration for testLogRollOnDatanodeDeath ****/
// make sure log.hflush() calls syncFs() to open a pipeline
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
// the namenode might still try to choose the recently-dead datanode
// for a pipeline, so try to a new pipeline multiple times
TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30);
TEST_UTIL.getConfiguration().setInt(
"hbase.regionserver.hlog.tolerable.lowreplication", 2);
TEST_UTIL.getConfiguration().setInt(
"hbase.regionserver.hlog.lowreplication.rolllimit", 3);
}
private HRegionServer startAndWriteData(String tableName, byte[] value)
throws IOException {
// When the META table can be opened, the region servers are running
new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
HRegionServer regionServer = TEST_UTIL.getHbaseCluster()
.getRegionServerThreads().get(0).getRegionServer();
// Create the test table and open it
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc);
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
regionServer = TEST_UTIL.getRSForFirstRegionInTable(Bytes
.toBytes(tableName));
for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls
Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
put.add(HConstants.CATALOG_FAMILY, null, value);
table.put(put);
if (i % 32 == 0) {
// After every 32 writes sleep to let the log roller run
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
// continue
}
}
}
return regionServer;
}
}