From 1acc426e7eb56b5a91044f77c082a9ecc1d5c093 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 27 Sep 2012 05:38:35 +0000 Subject: [PATCH] HBASE-6876 Clean up WARNs and log messages around startup git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1390846 13f79535-47bb-0310-9956-ffa450edef68 --- .../hbase/client/HConnectionManager.java | 2 +- .../hbase/io/hfile/HFileBlockIndex.java | 23 +++-- .../hadoop/hbase/ipc/HBaseRpcMetrics.java | 4 +- .../apache/hadoop/hbase/ipc/HBaseServer.java | 2 +- .../apache/hadoop/hbase/ipc/RpcEngine.java | 6 +- .../hbase/master/ActiveMasterManager.java | 9 +- .../apache/hadoop/hbase/master/HMaster.java | 7 +- .../hbase/zookeeper/RecoverableZooKeeper.java | 5 +- .../TestHFileInlineToRootChunkConversion.java | 85 +++++++++++++++++++ 9 files changed, 119 insertions(+), 24 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 1665e5db6bf..ab2ab19416c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -2450,7 +2450,7 @@ public class HConnectionManager { c.getInt("hbase.client.serverside.retries.multiplier", 10); int retries = hcRetries * serversideMultiplier; c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); - log.debug("Set serverside HConnection retries=" + retries); + log.debug("HConnection retries=" + retries); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 503fe67f19a..7fbc06da71b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -829,7 +829,7 @@ public class HFileBlockIndex { * @throws IOException */ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { - if (curInlineChunk.getNumEntries() != 0) { + if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) { throw new IOException("Trying to write a multi-level block index, " + "but are " + curInlineChunk.getNumEntries() + " entries in the " + "last inline chunk."); @@ -840,9 +840,11 @@ public class HFileBlockIndex { byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() : null; - while (rootChunk.getRootSize() > maxChunkSize) { - rootChunk = writeIntermediateLevel(out, rootChunk); - numLevels += 1; + if (curInlineChunk != null) { + while (rootChunk.getRootSize() > maxChunkSize) { + rootChunk = writeIntermediateLevel(out, rootChunk); + numLevels += 1; + } } // write the root level @@ -1004,11 +1006,18 @@ public class HFileBlockIndex { */ @Override public boolean shouldWriteBlock(boolean closing) { - if (singleLevelOnly) + if (singleLevelOnly) { throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); + } - if (curInlineChunk.getNumEntries() == 0) + if (curInlineChunk == null) { + throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + + "called with closing=true and then called again?"); + } + + if (curInlineChunk.getNumEntries() == 0) { return false; + } // We do have some entries in the current inline chunk. if (closing) { @@ -1018,7 +1027,7 @@ public class HFileBlockIndex { expectNumLevels(1); rootChunk = curInlineChunk; - curInlineChunk = new BlockIndexChunk(); + curInlineChunk = null; // Disallow adding any more index entries. return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java index b6c315fb09d..119907df375 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java @@ -62,8 +62,8 @@ public class HBaseRpcMetrics implements Updater { metricsRecord.setTag("port", port); - LOG.info("Initializing RPC Metrics with hostName=" - + hostName + ", port=" + port); + LOG.info("Initializing RPC Metrics for className=" + + hostName + " on port=" + port); context.registerUpdater(this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java index 0bb1f8af39a..8f0cef1d4c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java @@ -525,6 +525,7 @@ public abstract class HBaseServer implements RpcServer { readers[i] = reader; readPool.execute(reader); } + LOG.info("Started " + readThreads + " reader(s) in Listener."); // Register accepts on the server socket with the selector. acceptChannel.register(selector, SelectionKey.OP_ACCEPT); @@ -541,7 +542,6 @@ public abstract class HBaseServer implements RpcServer { this.readSelector = Selector.open(); } public void run() { - LOG.info("Starting " + getName()); try { doRunLoop(); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java index 7c7cb25a7a2..68038556e43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java @@ -20,14 +20,12 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; import java.net.InetSocketAddress; + import javax.net.SocketFactory; -import org.apache.hadoop.hbase.ipc.VersionedProtocol; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; - -import com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.security.User; /** An RPC implementation. */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index 025dd3fdbcc..062f7dfefd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -145,10 +145,11 @@ class ActiveMasterManager extends ZooKeeperListener { this.watcher.getMasterAddressZNode(), this.sn)) { // If we were a backup master before, delete our ZNode from the backup - // master directory since we are the active now - LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory"); - ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode); - + // master directory since we are the active now) + if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) { + LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory"); + ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode); + } // Save the znode in a file, this will allow to check if we crash in the launch scripts ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 1387f17a8a8..2d271117d2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -185,6 +185,7 @@ import org.apache.hadoop.net.DNS; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.Watcher; import org.apache.hadoop.hbase.trace.SpanReceiverHost; +import org.apache.hadoop.hbase.util.FSUtils; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -324,6 +325,8 @@ Server { public HMaster(final Configuration conf) throws IOException, KeeperException, InterruptedException { this.conf = new Configuration(conf); + LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) + + ", hbase.cluster.distributed=" + this.conf.getBoolean("hbase.cluster.distributed", false)); // Disable the block cache on the master this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); // Set how many times to retry talking to another server over HConnection. @@ -333,7 +336,7 @@ Server { conf.get("hbase.master.dns.interface", "default"), conf.get("hbase.master.dns.nameserver", "default"))); int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); - // Creation of a HSA will force a resolve. + // Creation of a ISA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); if (initialIsa.getAddress() == null) { throw new IllegalArgumentException("Failed resolve of " + initialIsa); @@ -2291,7 +2294,7 @@ Server { * @see org.apache.hadoop.hbase.master.HMasterCommandLine */ public static void main(String [] args) throws Exception { - VersionInfo.logVersion(); + VersionInfo.logVersion(); new HMasterCommandLine(HMaster.class).doMain(args); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 2b71860e3d4..c740689015d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -134,12 +134,11 @@ public class RecoverableZooKeeper { switch (e.code()) { case NONODE: if (isRetry) { - LOG.info("Node " + path + " already deleted. Assuming that a " + + LOG.info("Node " + path + " already deleted. Assuming a " + "previous attempt succeeded."); return; } - LOG.warn("Node " + path + " already deleted, and this is not a " + - "retry"); + LOG.warn("Node " + path + " already deleted, retry=" + isRetry); throw e; case CONNECTIONLOSS: diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java new file mode 100644 index 00000000000..0d132de3b39 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.experimental.categories.Category; +import org.junit.Test; + +/** + * Test a case when an inline index chunk is converted to a root one. This reproduces the bug in + * HBASE-6871. We write a carefully selected number of relatively large keys so that we accumulate + * a leaf index chunk that only goes over the configured index chunk size after adding the last + * key/value. The bug is in that when we close the file, we convert that inline (leaf-level) chunk + * into a root chunk, but then look at the size of that root chunk, find that it is greater than + * the configured chunk size, and split it into a number of intermediate index blocks that should + * really be leaf-level blocks. If more keys were added, we would flush the leaf-level block, add + * another entry to the root-level block, and that would prevent us from upgrading the leaf-level + * chunk to the root chunk, thus not triggering the bug. + */ +@Category(SmallTests.class) +public class TestHFileInlineToRootChunkConversion { + private final HBaseTestingUtility testUtil = new HBaseTestingUtility(); + private final Configuration conf = testUtil.getConfiguration(); + + @Test + public void testWriteHFile() throws Exception { + Path hfPath = new Path(testUtil.getDataTestDir(), + TestHFileInlineToRootChunkConversion.class.getSimpleName() + ".hfile"); + int maxChunkSize = 1024; + FileSystem fs = FileSystem.get(conf); + CacheConfig cacheConf = new CacheConfig(conf); + conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); + HFileWriterV2 hfw = + (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf) + .withBlockSize(16) + .withPath(fs, hfPath).create(); + List keys = new ArrayList(); + StringBuilder sb = new StringBuilder(); + + for (int i = 0; i < 4; ++i) { + sb.append("key" + String.format("%05d", i)); + sb.append("_"); + for (int j = 0; j < 100; ++j) { + sb.append('0' + j); + } + String keyStr = sb.toString(); + sb.setLength(0); + + byte[] k = Bytes.toBytes(keyStr); + System.out.println("Key: " + Bytes.toString(k)); + keys.add(k); + byte[] v = Bytes.toBytes("value" + i); + hfw.append(k, v); + } + hfw.close(); + + HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf); + HFileScanner scanner = reader.getScanner(true, true); + for (int i = 0; i < keys.size(); ++i) { + scanner.seekTo(keys.get(i)); + } + reader.close(); + } +}