HBASE-6876 Clean up WARNs and log messages around startup; REVERT OF OVERCOMMIT

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1390847 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-09-27 05:39:43 +00:00
parent 1acc426e7e
commit c8375a55e9
9 changed files with 24 additions and 119 deletions

View File

@ -2450,7 +2450,7 @@ public class HConnectionManager {
c.getInt("hbase.client.serverside.retries.multiplier", 10); c.getInt("hbase.client.serverside.retries.multiplier", 10);
int retries = hcRetries * serversideMultiplier; int retries = hcRetries * serversideMultiplier;
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
log.debug("HConnection retries=" + retries); log.debug("Set serverside HConnection retries=" + retries);
} }
} }

View File

@ -829,7 +829,7 @@ public class HFileBlockIndex {
* @throws IOException * @throws IOException
*/ */
public long writeIndexBlocks(FSDataOutputStream out) throws IOException { public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) { if (curInlineChunk.getNumEntries() != 0) {
throw new IOException("Trying to write a multi-level block index, " + throw new IOException("Trying to write a multi-level block index, " +
"but are " + curInlineChunk.getNumEntries() + " entries in the " + "but are " + curInlineChunk.getNumEntries() + " entries in the " +
"last inline chunk."); "last inline chunk.");
@ -840,11 +840,9 @@ public class HFileBlockIndex {
byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata()
: null; : null;
if (curInlineChunk != null) { while (rootChunk.getRootSize() > maxChunkSize) {
while (rootChunk.getRootSize() > maxChunkSize) { rootChunk = writeIntermediateLevel(out, rootChunk);
rootChunk = writeIntermediateLevel(out, rootChunk); numLevels += 1;
numLevels += 1;
}
} }
// write the root level // write the root level
@ -1006,18 +1004,11 @@ public class HFileBlockIndex {
*/ */
@Override @Override
public boolean shouldWriteBlock(boolean closing) { public boolean shouldWriteBlock(boolean closing) {
if (singleLevelOnly) { if (singleLevelOnly)
throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
}
if (curInlineChunk == null) { if (curInlineChunk.getNumEntries() == 0)
throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " +
"called with closing=true and then called again?");
}
if (curInlineChunk.getNumEntries() == 0) {
return false; return false;
}
// We do have some entries in the current inline chunk. // We do have some entries in the current inline chunk.
if (closing) { if (closing) {
@ -1027,7 +1018,7 @@ public class HFileBlockIndex {
expectNumLevels(1); expectNumLevels(1);
rootChunk = curInlineChunk; rootChunk = curInlineChunk;
curInlineChunk = null; // Disallow adding any more index entries. curInlineChunk = new BlockIndexChunk();
return false; return false;
} }

View File

@ -62,8 +62,8 @@ public class HBaseRpcMetrics implements Updater {
metricsRecord.setTag("port", port); metricsRecord.setTag("port", port);
LOG.info("Initializing RPC Metrics for className=" LOG.info("Initializing RPC Metrics with hostName="
+ hostName + " on port=" + port); + hostName + ", port=" + port);
context.registerUpdater(this); context.registerUpdater(this);

View File

@ -525,7 +525,6 @@ public abstract class HBaseServer implements RpcServer {
readers[i] = reader; readers[i] = reader;
readPool.execute(reader); readPool.execute(reader);
} }
LOG.info("Started " + readThreads + " reader(s) in Listener.");
// Register accepts on the server socket with the selector. // Register accepts on the server socket with the selector.
acceptChannel.register(selector, SelectionKey.OP_ACCEPT); acceptChannel.register(selector, SelectionKey.OP_ACCEPT);
@ -542,6 +541,7 @@ public abstract class HBaseServer implements RpcServer {
this.readSelector = Selector.open(); this.readSelector = Selector.open();
} }
public void run() { public void run() {
LOG.info("Starting " + getName());
try { try {
doRunLoop(); doRunLoop();
} finally { } finally {

View File

@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.ipc;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import javax.net.SocketFactory; import javax.net.SocketFactory;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.security.User;
import com.google.protobuf.ServiceException;
/** An RPC implementation. */ /** An RPC implementation. */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -145,11 +145,10 @@ class ActiveMasterManager extends ZooKeeperListener {
this.watcher.getMasterAddressZNode(), this.sn)) { this.watcher.getMasterAddressZNode(), this.sn)) {
// If we were a backup master before, delete our ZNode from the backup // If we were a backup master before, delete our ZNode from the backup
// master directory since we are the active now) // master directory since we are the active now
if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) { LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory");
LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory"); ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode);
ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode);
}
// Save the znode in a file, this will allow to check if we crash in the launch scripts // Save the znode in a file, this will allow to check if we crash in the launch scripts
ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString()); ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString());

View File

@ -185,7 +185,6 @@ import org.apache.hadoop.net.DNS;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.Watcher; import org.apache.zookeeper.Watcher;
import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.util.FSUtils;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
@ -325,8 +324,6 @@ Server {
public HMaster(final Configuration conf) public HMaster(final Configuration conf)
throws IOException, KeeperException, InterruptedException { throws IOException, KeeperException, InterruptedException {
this.conf = new Configuration(conf); this.conf = new Configuration(conf);
LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
", hbase.cluster.distributed=" + this.conf.getBoolean("hbase.cluster.distributed", false));
// Disable the block cache on the master // Disable the block cache on the master
this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
// Set how many times to retry talking to another server over HConnection. // Set how many times to retry talking to another server over HConnection.
@ -336,7 +333,7 @@ Server {
conf.get("hbase.master.dns.interface", "default"), conf.get("hbase.master.dns.interface", "default"),
conf.get("hbase.master.dns.nameserver", "default"))); conf.get("hbase.master.dns.nameserver", "default")));
int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
// Creation of a ISA will force a resolve. // Creation of a HSA will force a resolve.
InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
if (initialIsa.getAddress() == null) { if (initialIsa.getAddress() == null) {
throw new IllegalArgumentException("Failed resolve of " + initialIsa); throw new IllegalArgumentException("Failed resolve of " + initialIsa);
@ -2294,7 +2291,7 @@ Server {
* @see org.apache.hadoop.hbase.master.HMasterCommandLine * @see org.apache.hadoop.hbase.master.HMasterCommandLine
*/ */
public static void main(String [] args) throws Exception { public static void main(String [] args) throws Exception {
VersionInfo.logVersion(); VersionInfo.logVersion();
new HMasterCommandLine(HMaster.class).doMain(args); new HMasterCommandLine(HMaster.class).doMain(args);
} }

View File

@ -134,11 +134,12 @@ public class RecoverableZooKeeper {
switch (e.code()) { switch (e.code()) {
case NONODE: case NONODE:
if (isRetry) { if (isRetry) {
LOG.info("Node " + path + " already deleted. Assuming a " + LOG.info("Node " + path + " already deleted. Assuming that a " +
"previous attempt succeeded."); "previous attempt succeeded.");
return; return;
} }
LOG.warn("Node " + path + " already deleted, retry=" + isRetry); LOG.warn("Node " + path + " already deleted, and this is not a " +
"retry");
throw e; throw e;
case CONNECTIONLOSS: case CONNECTIONLOSS:

View File

@ -1,85 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.hbase.io.hfile;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.experimental.categories.Category;
import org.junit.Test;
/**
* Test a case when an inline index chunk is converted to a root one. This reproduces the bug in
* HBASE-6871. We write a carefully selected number of relatively large keys so that we accumulate
* a leaf index chunk that only goes over the configured index chunk size after adding the last
* key/value. The bug is in that when we close the file, we convert that inline (leaf-level) chunk
* into a root chunk, but then look at the size of that root chunk, find that it is greater than
* the configured chunk size, and split it into a number of intermediate index blocks that should
* really be leaf-level blocks. If more keys were added, we would flush the leaf-level block, add
* another entry to the root-level block, and that would prevent us from upgrading the leaf-level
* chunk to the root chunk, thus not triggering the bug.
*/
@Category(SmallTests.class)
public class TestHFileInlineToRootChunkConversion {
private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
private final Configuration conf = testUtil.getConfiguration();
@Test
public void testWriteHFile() throws Exception {
Path hfPath = new Path(testUtil.getDataTestDir(),
TestHFileInlineToRootChunkConversion.class.getSimpleName() + ".hfile");
int maxChunkSize = 1024;
FileSystem fs = FileSystem.get(conf);
CacheConfig cacheConf = new CacheConfig(conf);
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
HFileWriterV2 hfw =
(HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
.withBlockSize(16)
.withPath(fs, hfPath).create();
List<byte[]> keys = new ArrayList<byte[]>();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 4; ++i) {
sb.append("key" + String.format("%05d", i));
sb.append("_");
for (int j = 0; j < 100; ++j) {
sb.append('0' + j);
}
String keyStr = sb.toString();
sb.setLength(0);
byte[] k = Bytes.toBytes(keyStr);
System.out.println("Key: " + Bytes.toString(k));
keys.add(k);
byte[] v = Bytes.toBytes("value" + i);
hfw.append(k, v);
}
hfw.close();
HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf);
HFileScanner scanner = reader.getScanner(true, true);
for (int i = 0; i < keys.size(); ++i) {
scanner.seekTo(keys.get(i));
}
reader.close();
}
}