diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 08533b42316..78964d6c9c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -91,21 +91,22 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { : sizeWithoutCellBlock; ByteBuf buf = ctx.alloc().buffer(sizeWithoutCellBlock + 4); buf.writeInt(totalSize); - ByteBufOutputStream bbos = new ByteBufOutputStream(buf); - requestHeader.writeDelimitedTo(bbos); - if (call.param != null) { - call.param.writeDelimitedTo(bbos); - } - if (cellBlock != null) { - ChannelPromise withoutCellBlockPromise = ctx.newPromise(); - ctx.write(buf, withoutCellBlockPromise); - ChannelPromise cellBlockPromise = ctx.newPromise(); - ctx.write(cellBlock, cellBlockPromise); - PromiseCombiner combiner = new PromiseCombiner(); - combiner.addAll(withoutCellBlockPromise, cellBlockPromise); - combiner.finish(promise); - } else { - ctx.write(buf, promise); + try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf)) { + requestHeader.writeDelimitedTo(bbos); + if (call.param != null) { + call.param.writeDelimitedTo(bbos); + } + if (cellBlock != null) { + ChannelPromise withoutCellBlockPromise = ctx.newPromise(); + ctx.write(buf, withoutCellBlockPromise); + ChannelPromise cellBlockPromise = ctx.newPromise(); + ctx.write(cellBlock, cellBlockPromise); + PromiseCombiner combiner = new PromiseCombiner(); + combiner.addAll(withoutCellBlockPromise, cellBlockPromise); + combiner.finish(promise); + } else { + ctx.write(buf, promise); + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index a31cab95f43..dafe421bcd8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -1930,21 +1930,21 @@ public class ZKUtil { socket.connect(sockAddr, timeout); socket.setSoTimeout(timeout); - PrintWriter out = new PrintWriter(socket.getOutputStream(), true); - BufferedReader in = new BufferedReader(new InputStreamReader( - socket.getInputStream())); - out.println("stat"); - out.flush(); - ArrayList res = new ArrayList<>(); - while (true) { - String line = in.readLine(); - if (line != null) { - res.add(line); - } else { - break; + try (PrintWriter out = new PrintWriter(socket.getOutputStream(), true); + BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream()))) { + out.println("stat"); + out.flush(); + ArrayList res = new ArrayList<>(); + while (true) { + String line = in.readLine(); + if (line != null) { + res.add(line); + } else { + break; + } } + return res.toArray(new String[res.size()]); } - return res.toArray(new String[res.size()]); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index 5c8fa1ba563..d1b8bb72cf1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -315,7 +315,7 @@ public final class BucketAllocator { this.bucketSizes = bucketSizes == null ? DEFAULT_BUCKET_SIZES : bucketSizes; Arrays.sort(this.bucketSizes); this.bigItemSize = Ints.max(this.bucketSizes); - this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * bigItemSize; + this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * (long) bigItemSize; buckets = new Bucket[(int) (availableSpace / bucketCapacity)]; if (buckets.length < this.bucketSizes.length) throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java index a48444c9f63..86dc0141d2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java @@ -54,60 +54,61 @@ public class MasterDumpServlet extends StateDumpServlet { response.setContentType("text/plain"); OutputStream os = response.getOutputStream(); - PrintWriter out = new PrintWriter(os); + try (PrintWriter out = new PrintWriter(os)) { - out.println("Master status for " + master.getServerName() + out.println("Master status for " + master.getServerName() + " as of " + new Date()); - out.println("\n\nVersion Info:"); - out.println(LINE); - dumpVersionInfo(out); + out.println("\n\nVersion Info:"); + out.println(LINE); + dumpVersionInfo(out); - out.println("\n\nTasks:"); - out.println(LINE); - TaskMonitor.get().dumpAsText(out); + out.println("\n\nTasks:"); + out.println(LINE); + TaskMonitor.get().dumpAsText(out); - out.println("\n\nServers:"); - out.println(LINE); - dumpServers(master, out); + out.println("\n\nServers:"); + out.println(LINE); + dumpServers(master, out); - out.println("\n\nRegions-in-transition:"); - out.println(LINE); - dumpRIT(master, out); + out.println("\n\nRegions-in-transition:"); + out.println(LINE); + dumpRIT(master, out); - out.println("\n\nExecutors:"); - out.println(LINE); - dumpExecutors(master.getExecutorService(), out); + out.println("\n\nExecutors:"); + out.println(LINE); + dumpExecutors(master.getExecutorService(), out); - out.println("\n\nStacks:"); - out.println(LINE); - out.flush(); - PrintStream ps = new PrintStream(response.getOutputStream(), false, "UTF-8"); - Threads.printThreadInfo(ps, ""); - ps.flush(); + out.println("\n\nStacks:"); + out.println(LINE); + out.flush(); + PrintStream ps = new PrintStream(response.getOutputStream(), false, "UTF-8"); + Threads.printThreadInfo(ps, ""); + ps.flush(); - out.println("\n\nMaster configuration:"); - out.println(LINE); - Configuration conf = master.getConfiguration(); - out.flush(); - conf.writeXml(os); - os.flush(); + out.println("\n\nMaster configuration:"); + out.println(LINE); + Configuration conf = master.getConfiguration(); + out.flush(); + conf.writeXml(os); + os.flush(); - out.println("\n\nRecent regionserver aborts:"); - out.println(LINE); - master.getRegionServerFatalLogBuffer().dumpTo(out); + out.println("\n\nRecent regionserver aborts:"); + out.println(LINE); + master.getRegionServerFatalLogBuffer().dumpTo(out); - out.println("\n\nLogs"); - out.println(LINE); - long tailKb = getTailKbParam(request); - LogMonitoring.dumpTailOfLogs(out, tailKb); + out.println("\n\nLogs"); + out.println(LINE); + long tailKb = getTailKbParam(request); + LogMonitoring.dumpTailOfLogs(out, tailKb); - out.println("\n\nRS Queue:"); - out.println(LINE); - if(isShowQueueDump(conf)) { - RSDumpServlet.dumpQueue(master, out); + out.println("\n\nRS Queue:"); + out.println(LINE); + if (isShowQueueDump(conf)) { + RSDumpServlet.dumpQueue(master, out); + } + out.flush(); } - out.flush(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 8b9266c845b..aa64caa5047 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1291,7 +1291,7 @@ public class MasterRpcServices extends RSRpcServices throws ServiceException { try { master.checkInitialized(); - Boolean result = master.getHFileCleaner().runCleaner() && master.getLogCleaner().runCleaner(); + boolean result = master.getHFileCleaner().runCleaner() && master.getLogCleaner().runCleaner(); return ResponseConverter.buildRunCleanerChoreResponse(result); } catch (IOException ioe) { throw new ServiceException(ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java index 9499a79ac7b..997ffcc9701 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java @@ -55,52 +55,53 @@ public class RSDumpServlet extends StateDumpServlet { } OutputStream os = response.getOutputStream(); - PrintWriter out = new PrintWriter(os); + try (PrintWriter out = new PrintWriter(os)) { - out.println("RegionServer status for " + hrs.getServerName() + out.println("RegionServer status for " + hrs.getServerName() + " as of " + new Date()); - out.println("\n\nVersion Info:"); - out.println(LINE); - dumpVersionInfo(out); + out.println("\n\nVersion Info:"); + out.println(LINE); + dumpVersionInfo(out); - out.println("\n\nTasks:"); - out.println(LINE); - TaskMonitor.get().dumpAsText(out); + out.println("\n\nTasks:"); + out.println(LINE); + TaskMonitor.get().dumpAsText(out); - out.println("\n\nRowLocks:"); - out.println(LINE); - dumpRowLock(hrs, out); + out.println("\n\nRowLocks:"); + out.println(LINE); + dumpRowLock(hrs, out); - out.println("\n\nExecutors:"); - out.println(LINE); - dumpExecutors(hrs.getExecutorService(), out); + out.println("\n\nExecutors:"); + out.println(LINE); + dumpExecutors(hrs.getExecutorService(), out); - out.println("\n\nStacks:"); - out.println(LINE); - PrintStream ps = new PrintStream(response.getOutputStream(), false, "UTF-8"); - Threads.printThreadInfo(ps, ""); - ps.flush(); + out.println("\n\nStacks:"); + out.println(LINE); + PrintStream ps = new PrintStream(response.getOutputStream(), false, "UTF-8"); + Threads.printThreadInfo(ps, ""); + ps.flush(); - out.println("\n\nRS Configuration:"); - out.println(LINE); - Configuration conf = hrs.getConfiguration(); - out.flush(); - conf.writeXml(os); - os.flush(); + out.println("\n\nRS Configuration:"); + out.println(LINE); + Configuration conf = hrs.getConfiguration(); + out.flush(); + conf.writeXml(os); + os.flush(); - out.println("\n\nLogs"); - out.println(LINE); - long tailKb = getTailKbParam(request); - LogMonitoring.dumpTailOfLogs(out, tailKb); + out.println("\n\nLogs"); + out.println(LINE); + long tailKb = getTailKbParam(request); + LogMonitoring.dumpTailOfLogs(out, tailKb); - out.println("\n\nRS Queue:"); - out.println(LINE); - if(isShowQueueDump(conf)) { - dumpQueue(hrs, out); + out.println("\n\nRS Queue:"); + out.println(LINE); + if (isShowQueueDump(conf)) { + dumpQueue(hrs, out); + } + + out.flush(); } - - out.flush(); } public static void dumpRowLock(HRegionServer hrs, PrintWriter out) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index 97b83875313..64ca7716bc1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -101,10 +101,9 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { long currentTime = EnvironmentEdgeManager.currentTime(); for(StoreFile sf: files){ // Check MIN_VERSIONS is in HStore removeUnneededFiles - Long maxTs = sf.getReader().getMaxTimestamp(); + long maxTs = sf.getReader().getMaxTimestamp(); long maxTtl = storeConfigInfo.getStoreFileTtl(); - if(maxTs == null - || maxTtl == Long.MAX_VALUE + if (maxTtl == Long.MAX_VALUE || (currentTime - maxTtl < maxTs)){ continue; } else{ @@ -120,10 +119,9 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { Collection expiredStores = new ArrayList<>(); for(StoreFile sf: files){ // Check MIN_VERSIONS is in HStore removeUnneededFiles - Long maxTs = sf.getReader().getMaxTimestamp(); + long maxTs = sf.getReader().getMaxTimestamp(); long maxTtl = storeConfigInfo.getStoreFileTtl(); - if(maxTs == null - || maxTtl == Long.MAX_VALUE + if (maxTtl == Long.MAX_VALUE || (currentTime - maxTtl < maxTs)){ continue; } else if(filesCompacting == null || filesCompacting.contains(sf) == false){ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index ce018da360f..a9a53b83336 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -427,7 +427,7 @@ public class RegionMover extends AbstractHBaseTool { moveRegionsPool.shutdown(); long timeoutInSeconds = regionsToMove.size() - * admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); try { if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) { moveRegionsPool.shutdownNow(); @@ -501,7 +501,7 @@ public class RegionMover extends AbstractHBaseTool { moveRegionsPool.shutdown(); long timeoutInSeconds = regionsToMove.size() - * admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); try { if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) { moveRegionsPool.shutdownNow(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index da828ded1e0..df4c1bd48d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -711,7 +711,8 @@ public class WALSplitter { for (FileStatus status : files) { String fileName = status.getPath().getName(); try { - Long tmpSeqId = Long.parseLong(fileName.substring(0, fileName.length() + long tmpSeqId = + Long.parseLong(fileName.substring(0, fileName.length() - SEQUENCE_ID_FILE_SUFFIX_LENGTH)); maxSeqId = Math.max(tmpSeqId, maxSeqId); } catch (NumberFormatException ex) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 0f16bfa5e94..b0db13a49db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -268,4 +268,12 @@ public class TestBucketCache { TEST_UTIL.cleanupTestDir(); } + + @Test + public void testBucketAllocatorLargeBuckets() throws BucketAllocatorException { + long availableSpace = 20 * 1024L * 1024 * 1024; + int[] bucketSizes = new int[] { 1024, 1024 * 1024, 1024 * 1024 * 1024 }; + BucketAllocator allocator = new BucketAllocator(availableSpace, bucketSizes); + assertTrue(allocator.getBuckets().length > 0); + } }