From 9f39d356c404d07b5826a147d29c700da8b7489a Mon Sep 17 00:00:00 2001 From: Mikhail Antonov Date: Tue, 8 Dec 2015 13:53:21 -0800 Subject: [PATCH 01/72] HBASE-7171 Initial web UI for region/memstore/storefiles details --- .../tmpl/regionserver/RegionListTmpl.jamon | 20 ++- .../hbase-webapps/regionserver/region.jsp | 129 ++++++++++++++++++ .../hbase-webapps/regionserver/storeFile.jsp | 115 ++++++++++++++++ 3 files changed, 259 insertions(+), 5 deletions(-) create mode 100644 hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp create mode 100644 hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon index bf143b97c79..8e341f0ed49 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon @@ -95,7 +95,9 @@ <%for HRegionInfo r: onlineRegions %> - <% r.getRegionNameAsString() %> + + <% r.getRegionNameAsString() %> + <% Bytes.toStringBinary(r.getStartKey()) %> <% Bytes.toStringBinary(r.getEndKey()) %> <% r.getReplicaId() %> @@ -121,7 +123,9 @@ <%java> RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); - <% r.getRegionNameAsString() %> + + <% r.getRegionNameAsString() %> + <%if load != null %> <% load.getReadRequestsCount() %> <% load.getWriteRequestsCount() %> @@ -154,7 +158,9 @@ <%java> RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); - <% r.getRegionNameAsString() %> + + <% r.getRegionNameAsString() %> + <%if load != null %> <% load.getStores() %> <% load.getStorefiles() %> @@ -193,7 +199,9 @@ ((float) load.getCurrentCompactedKVs() / load.getTotalCompactingKVs())) + "%"; } - <% r.getRegionNameAsString() %> + + <% r.getRegionNameAsString() %> + <%if load != null %> <% load.getTotalCompactingKVs() %> <% load.getCurrentCompactedKVs() %> @@ -220,7 +228,9 @@ <%java> RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); - <% r.getRegionNameAsString() %> + + <% r.getRegionNameAsString() %> + <%if load != null %> <% load.getMemstoreSizeMB() %>m diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp new file mode 100644 index 00000000000..cd35ad1fcfa --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp @@ -0,0 +1,129 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.Collection" + import="java.util.Date" + import="java.util.List" + import="static org.apache.commons.lang.StringEscapeUtils.escapeXml" + import="org.apache.hadoop.conf.Configuration" + import="org.apache.hadoop.hbase.HTableDescriptor" + import="org.apache.hadoop.hbase.HColumnDescriptor" + import="org.apache.hadoop.hbase.HBaseConfiguration" + import="org.apache.hadoop.hbase.HRegionInfo" + import="org.apache.hadoop.hbase.regionserver.HRegionServer" + import="org.apache.hadoop.hbase.regionserver.Region" + import="org.apache.hadoop.hbase.regionserver.Store" + import="org.apache.hadoop.hbase.regionserver.StoreFile"%> +<% + String regionName = request.getParameter("name"); + HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); + Configuration conf = rs.getConfiguration(); + + Region region = rs.getFromOnlineRegions(regionName); + String displayName = region.getRegionInfo().getRegionNameAsString(); +%> + + + + + + HBase RegionServer: <%= rs.getServerName() %> + + + + + + + + + + + + + + +
+
+ +
+ +<% if(region != null) { // + List stores = region.getStores(); + for (Store store : stores) { + String cf = store.getColumnFamilyName(); + Collection storeFiles = store.getStorefiles(); %> + +

Column Family: <%= cf %>

+ +

Memstore size (MB): <%= (int) (store.getMemStoreSize() / 1024 / 1024) %>

+ +

Store Files

+ + + + + + + + <% for(StoreFile sf : storeFiles) { %> + + + + + + <% } %> + +

<%= storeFiles.size() %> StoreFile(s) in set.

+
Store FileSize (MB)Modification time
<%= sf.getPath() %><%= (int) (rs.getFileSystem().getLength(sf.getPath()) / 1024 / 1024) %><%= new Date(sf.getModificationTimeStamp()) %>
+ <% } + }%> +
+ + + + + diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp new file mode 100644 index 00000000000..cbbb61fa9d1 --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp @@ -0,0 +1,115 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.Collection" + import="java.util.Date" + import="java.util.List" + import="java.io.ByteArrayOutputStream" + import="java.io.PrintStream" + import="java.io.BufferedReader" + import="java.io.InputStreamReader" + import="org.apache.hadoop.conf.Configuration" + import="org.apache.hadoop.hbase.HBaseConfiguration" + import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter" + import="org.apache.hadoop.hbase.regionserver.HRegionServer" + import="org.apache.hadoop.hbase.regionserver.Region" + import="org.apache.hadoop.hbase.regionserver.Store" + import="org.apache.hadoop.hbase.regionserver.StoreFile"%> +<% + String storeFile = request.getParameter("name"); + HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); + Configuration conf = rs.getConfiguration(); +%> + + + + + + HBase RegionServer: <%= rs.getServerName() %> + + + + + + + + + + + + + + +
+
+ +
+
+<%
+   try {
+     ProcessBuilder pb=new ProcessBuilder("hbase", "hfile", "-s", "-f", storeFile);
+     pb.redirectErrorStream(true);
+     Process pr = pb.start();
+     BufferedReader in = new BufferedReader(new InputStreamReader(pr.getInputStream()));
+     String line;
+     while ((line = in.readLine()) != null) {%>
+       <%= line %>
+     <%}
+     pr.waitFor();
+     in.close();
+   }
+   catch (Exception e) {%>
+     <%= e %>
+   <%}
+%>
+  
+
+ + + + + From 07e2496ad1735981ba910a873eeb6a50b1461f0d Mon Sep 17 00:00:00 2001 From: tedyu Date: Wed, 9 Dec 2015 07:20:55 -0800 Subject: [PATCH 02/72] HBASE-14954 IllegalArgumentException was thrown when doing online configuration change in CompactSplitThread (Victor Xu) --- .../regionserver/CompactSplitThread.java | 46 ++++++-- .../regionserver/TestCompactSplitThread.java | 104 ++++++++++++++++++ 2 files changed, 141 insertions(+), 9 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index be9180cc5d1..93a686fa579 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -617,8 +617,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi LOG.info("Changing the value of " + LARGE_COMPACTION_THREADS + " from " + this.longCompactions.getCorePoolSize() + " to " + largeThreads); - this.longCompactions.setMaximumPoolSize(largeThreads); - this.longCompactions.setCorePoolSize(largeThreads); + if(this.longCompactions.getCorePoolSize() < largeThreads) { + this.longCompactions.setMaximumPoolSize(largeThreads); + this.longCompactions.setCorePoolSize(largeThreads); + } else { + this.longCompactions.setCorePoolSize(largeThreads); + this.longCompactions.setMaximumPoolSize(largeThreads); + } } int smallThreads = newConf.getInt(SMALL_COMPACTION_THREADS, @@ -627,8 +632,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi LOG.info("Changing the value of " + SMALL_COMPACTION_THREADS + " from " + this.shortCompactions.getCorePoolSize() + " to " + smallThreads); - this.shortCompactions.setMaximumPoolSize(smallThreads); - this.shortCompactions.setCorePoolSize(smallThreads); + if(this.shortCompactions.getCorePoolSize() < smallThreads) { + this.shortCompactions.setMaximumPoolSize(smallThreads); + this.shortCompactions.setCorePoolSize(smallThreads); + } else { + this.shortCompactions.setCorePoolSize(smallThreads); + this.shortCompactions.setMaximumPoolSize(smallThreads); + } } int splitThreads = newConf.getInt(SPLIT_THREADS, @@ -637,8 +647,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi LOG.info("Changing the value of " + SPLIT_THREADS + " from " + this.splits.getCorePoolSize() + " to " + splitThreads); - this.splits.setMaximumPoolSize(smallThreads); - this.splits.setCorePoolSize(smallThreads); + if(this.splits.getCorePoolSize() < splitThreads) { + this.splits.setMaximumPoolSize(splitThreads); + this.splits.setCorePoolSize(splitThreads); + } else { + this.splits.setCorePoolSize(splitThreads); + this.splits.setMaximumPoolSize(splitThreads); + } } int mergeThreads = newConf.getInt(MERGE_THREADS, @@ -647,8 +662,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi LOG.info("Changing the value of " + MERGE_THREADS + " from " + this.mergePool.getCorePoolSize() + " to " + mergeThreads); - this.mergePool.setMaximumPoolSize(smallThreads); - this.mergePool.setCorePoolSize(smallThreads); + if(this.mergePool.getCorePoolSize() < mergeThreads) { + this.mergePool.setMaximumPoolSize(mergeThreads); + this.mergePool.setCorePoolSize(mergeThreads); + } else { + this.mergePool.setCorePoolSize(mergeThreads); + this.mergePool.setMaximumPoolSize(mergeThreads); + } } CompactionThroughputController old = this.compactionThroughputController; @@ -667,10 +687,18 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return this.shortCompactions.getCorePoolSize(); } - public int getLargeCompactionThreadNum() { + protected int getLargeCompactionThreadNum() { return this.longCompactions.getCorePoolSize(); } + protected int getSplitThreadNum() { + return this.splits.getCorePoolSize(); + } + + protected int getMergeThreadNum() { + return this.mergePool.getCorePoolSize(); + } + /** * {@inheritDoc} */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java new file mode 100644 index 00000000000..022279a7a21 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputControllerFactory; +import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@Category(MediumTests.class) +public class TestCompactSplitThread { + private static final Log LOG = LogFactory.getLog(TestCompactSplitThread.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private final TableName tableName = TableName.valueOf(getClass().getSimpleName()); + private final byte[] family = Bytes.toBytes("f"); + + @Test + public void testThreadPoolSizeTuning() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 3); + conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 4); + conf.setInt(CompactSplitThread.SPLIT_THREADS, 5); + conf.setInt(CompactSplitThread.MERGE_THREADS, 6); + TEST_UTIL.startMiniCluster(1); + Connection conn = ConnectionFactory.createConnection(conf); + try { + HTableDescriptor htd = new HTableDescriptor(tableName); + htd.addFamily(new HColumnDescriptor(family)); + htd.setCompactionEnabled(false); + TEST_UTIL.getHBaseAdmin().createTable(htd); + TEST_UTIL.waitTableAvailable(tableName); + HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName); + + // check initial configuration of thread pool sizes + assertEquals(3, regionServer.compactSplitThread.getLargeCompactionThreadNum()); + assertEquals(4, regionServer.compactSplitThread.getSmallCompactionThreadNum()); + assertEquals(5, regionServer.compactSplitThread.getSplitThreadNum()); + assertEquals(6, regionServer.compactSplitThread.getMergeThreadNum()); + + // change bigger configurations and do online update + conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 4); + conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 5); + conf.setInt(CompactSplitThread.SPLIT_THREADS, 6); + conf.setInt(CompactSplitThread.MERGE_THREADS, 7); + try { + regionServer.compactSplitThread.onConfigurationChange(conf); + } catch (IllegalArgumentException iae) { + Assert.fail("Update bigger configuration failed!"); + } + + // check again after online update + assertEquals(4, regionServer.compactSplitThread.getLargeCompactionThreadNum()); + assertEquals(5, regionServer.compactSplitThread.getSmallCompactionThreadNum()); + assertEquals(6, regionServer.compactSplitThread.getSplitThreadNum()); + assertEquals(7, regionServer.compactSplitThread.getMergeThreadNum()); + + // change smaller configurations and do online update + conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 2); + conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 3); + conf.setInt(CompactSplitThread.SPLIT_THREADS, 4); + conf.setInt(CompactSplitThread.MERGE_THREADS, 5); + try { + regionServer.compactSplitThread.onConfigurationChange(conf); + } catch (IllegalArgumentException iae) { + Assert.fail("Update smaller configuration failed!"); + } + + // check again after online update + assertEquals(2, regionServer.compactSplitThread.getLargeCompactionThreadNum()); + assertEquals(3, regionServer.compactSplitThread.getSmallCompactionThreadNum()); + assertEquals(4, regionServer.compactSplitThread.getSplitThreadNum()); + assertEquals(5, regionServer.compactSplitThread.getMergeThreadNum()); + } finally { + conn.close(); + TEST_UTIL.shutdownMiniCluster(); + } + } +} From c6e73f80c513fea98c4e71be31cd2ca0717b8e47 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Mon, 7 Dec 2015 11:01:51 -0800 Subject: [PATCH 03/72] HBASE-14942 Allow turning off BoundedByteBufferPool --- .../apache/hadoop/hbase/ipc/RpcServer.java | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 13846532c76..575503f6719 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -1988,15 +1988,20 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { final InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { - this.reservoir = new BoundedByteBufferPool( - conf.getInt("hbase.ipc.server.reservoir.max.buffer.size", 1024 * 1024), - conf.getInt("hbase.ipc.server.reservoir.initial.buffer.size", 16 * 1024), - // Make the max twice the number of handlers to be safe. - conf.getInt("hbase.ipc.server.reservoir.initial.max", - conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * 2), - // By default make direct byte buffers from the buffer pool. - conf.getBoolean("hbase.ipc.server.reservoir.direct.buffer", true)); + + if (conf.getBoolean("hbase.ipc.server.reservoir.enabled", true)) { + this.reservoir = new BoundedByteBufferPool( + conf.getInt("hbase.ipc.server.reservoir.max.buffer.size", 1024 * 1024), + conf.getInt("hbase.ipc.server.reservoir.initial.buffer.size", 16 * 1024), + // Make the max twice the number of handlers to be safe. + conf.getInt("hbase.ipc.server.reservoir.initial.max", + conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * 2), + // By default make direct byte buffers from the buffer pool. + conf.getBoolean("hbase.ipc.server.reservoir.direct.buffer", true)); + } else { + reservoir = null; + } this.server = server; this.services = services; this.bindAddress = bindAddress; From 967873b5783cc682c0bcd2c2a25aad7baa49f3a3 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 19 Nov 2015 14:19:00 -0800 Subject: [PATCH 04/72] HBASE-14851 Add test showing how to use per put TTL from thrift --- .../TestThriftHBaseServiceHandler.java | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 65abb0b6857..1575429d86a 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -684,6 +684,56 @@ public class TestThriftHBaseServiceHandler { } } + @Test + public void testPutTTL() throws Exception { + ThriftHBaseServiceHandler handler = createHandler(); + byte[] rowName = "testPutTTL".getBytes(); + ByteBuffer table = wrap(tableAname); + List columnValues = new ArrayList(); + + // Add some dummy data + columnValues.add( + new TColumnValue( + wrap(familyAname), + wrap(qualifierAname), + wrap(Bytes.toBytes(1L)))); + + + TPut put = new TPut(wrap(rowName), columnValues); + put.setColumnValues(columnValues); + + Map attributes = new HashMap<>(); + + // Time in ms for the kv's to live. + long ttlTimeMs = 2000L; + + // the _ttl attribute is a number of ms ttl for key values in this put. + attributes.put(wrap(Bytes.toBytes("_ttl")), wrap(Bytes.toBytes(ttlTimeMs))); + // Attach the attributes + put.setAttributes(attributes); + // Send it. + handler.put(table, put); + + // Now get the data back + TGet getOne = new TGet(wrap(rowName)); + TResult resultOne = handler.get(table, getOne); + + // It's there. + assertArrayEquals(rowName, resultOne.getRow()); + assertEquals(1, resultOne.getColumnValuesSize()); + + // Sleep 30 seconds just to make 100% sure that the key value should be expired. + Thread.sleep(ttlTimeMs * 15); + + TGet getTwo = new TGet(wrap(rowName)); + TResult resultTwo = handler.get(table, getTwo); + + + // Nothing should be there since it's ttl'd out. + assertNull(resultTwo.getRow()); + assertEquals(0, resultTwo.getColumnValuesSize()); + } + /** * Padding numbers to make comparison of sort order easier in a for loop * From 5dec5ad250322ba3dab8ff9800e82c039e4dce2e Mon Sep 17 00:00:00 2001 From: Gary Helmling Date: Wed, 9 Dec 2015 16:47:25 -0800 Subject: [PATCH 05/72] HBASE-14866 VerifyReplication and ReplicationAdmin should use full peer configuration for peer connection --- .../client/replication/ReplicationAdmin.java | 14 +- .../replication/ReplicationPeersZKImpl.java | 7 +- .../replication/ReplicationStateZKBase.java | 3 +- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 124 ----------- .../hadoop/hbase/zookeeper/TestZKUtil.java | 11 - .../hadoop/hbase/HBaseConfiguration.java | 83 +++++++- .../hadoop/hbase/zookeeper/ZKConfig.java | 201 +++++++++++++++--- .../hadoop/hbase/TestHBaseConfiguration.java | 10 +- .../hadoop/hbase/zookeeper/TestZKConfig.java | 126 +++++++++++ .../hadoop/hbase/mapreduce/SyncTable.java | 15 +- .../hbase/mapreduce/TableMapReduceUtil.java | 35 +-- .../hbase/mapreduce/TableOutputFormat.java | 22 +- .../replication/VerifyReplication.java | 25 ++- .../hbase/util/ServerRegionReplicaUtil.java | 4 +- .../apache/hadoop/hbase/TestZooKeeper.java | 65 ------ .../replication/TestReplicationAdmin.java | 36 +++- .../replication/TestReplicationEndpoint.java | 10 +- .../TestReplicationStateBasic.java | 4 +- .../TestReplicationStateZKImpl.java | 5 +- .../TestRegionReplicaReplicationEndpoint.java | 8 +- .../hadoop/hbase/zookeeper/TestZKConfig.java | 56 ----- 21 files changed, 496 insertions(+), 368 deletions(-) rename {hbase-client => hbase-common}/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java (68%) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 3a83d13804c..24a3dcb0831 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.data.Stat; @@ -627,7 +626,8 @@ public class ReplicationAdmin implements Closeable { } } - private List listValidReplicationPeers() { + @VisibleForTesting + List listValidReplicationPeers() { Map peers = listPeerConfigs(); if (peers == null || peers.size() <= 0) { return null; @@ -635,18 +635,16 @@ public class ReplicationAdmin implements Closeable { List validPeers = new ArrayList(peers.size()); for (Entry peerEntry : peers.entrySet()) { String peerId = peerEntry.getKey(); - String clusterKey = peerEntry.getValue().getClusterKey(); - Configuration peerConf = new Configuration(this.connection.getConfiguration()); Stat s = null; try { - ZKUtil.applyClusterKeyToConf(peerConf, clusterKey); Pair pair = this.replicationPeers.getPeerConf(peerId); + Configuration peerConf = pair.getSecond(); ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst()); s = zkw.getRecoverableZooKeeper().exists(peerConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT), null); if (null == s) { - LOG.info(peerId + ' ' + clusterKey + " is invalid now."); + LOG.info(peerId + ' ' + pair.getFirst().getClusterKey() + " is invalid now."); continue; } validPeers.add(peer); @@ -664,10 +662,6 @@ public class ReplicationAdmin implements Closeable { LOG.warn("Failed to get valid replication peers due to InterruptedException."); LOG.debug("Failure details to get valid replication peers.", e); continue; - } catch (IOException e) { - LOG.warn("Failed to get valid replication peers due to IOException."); - LOG.debug("Failure details to get valid replication peers.", e); - continue; } } return validPeers; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index a7d7dda0990..7099bfc61ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -318,11 +319,9 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re return null; } - Configuration otherConf = new Configuration(this.conf); + Configuration otherConf; try { - if (peerConfig.getClusterKey() != null && !peerConfig.getClusterKey().isEmpty()) { - ZKUtil.applyClusterKeyToConf(otherConf, peerConfig.getClusterKey()); - } + otherConf = HBaseConfiguration.createClusterConf(this.conf, peerConfig.getClusterKey()); } catch (IOException e) { LOG.error("Can't get peer configuration for peerId=" + peerId + " because:", e); return null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java index 1691b3f3840..4fbac0f623e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -69,7 +70,7 @@ public abstract class ReplicationStateZKBase { String peersZNodeName = conf.get("zookeeper.znode.replication.peers", "peers"); String queuesZNodeName = conf.get("zookeeper.znode.replication.rs", "rs"); this.peerStateNodeName = conf.get("zookeeper.znode.replication.peers.state", "peer-state"); - this.ourClusterKey = ZKUtil.getZooKeeperClusterKey(this.conf); + this.ourClusterKey = ZKConfig.getZooKeeperClusterKey(this.conf); this.replicationZNode = ZKUtil.joinZNode(this.zookeeper.baseZNode, replicationZNodeName); this.peersZNode = ZKUtil.joinZNode(replicationZNode, peersZNodeName); this.queuesZNode = ZKUtil.joinZNode(replicationZNode, queuesZNodeName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index ffbe2db3ad5..bf803bea90b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -76,7 +76,6 @@ import org.apache.zookeeper.proto.DeleteRequest; import org.apache.zookeeper.proto.SetDataRequest; import org.apache.zookeeper.server.ZooKeeperSaslServer; -import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.InvalidProtocolBufferException; /** @@ -96,25 +95,6 @@ public class ZKUtil { public static final char ZNODE_PATH_SEPARATOR = '/'; private static int zkDumpConnectionTimeOut; - // The Quorum for the ZK cluster can have one the following format (see examples below): - // (1). s1,s2,s3 (no client port in the list, the client port could be obtained from clientPort) - // (2). s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server, - // in this case, the clientPort would be ignored) - // (3). s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use - // the clientPort; otherwise, it would use the specified port) - @VisibleForTesting - public static class ZKClusterKey { - public String quorumString; - public int clientPort; - public String znodeParent; - - ZKClusterKey(String quorumString, int clientPort, String znodeParent) { - this.quorumString = quorumString; - this.clientPort = clientPort; - this.znodeParent = znodeParent; - } - } - /** * Creates a new connection to ZooKeeper, pulling settings and ensemble config * from the specified configuration object using methods from {@link ZKConfig}. @@ -365,110 +345,6 @@ public class ZKUtil { return path.substring(path.lastIndexOf("/")+1); } - /** - * Get the key to the ZK ensemble for this configuration without - * adding a name at the end - * @param conf Configuration to use to build the key - * @return ensemble key without a name - */ - public static String getZooKeeperClusterKey(Configuration conf) { - return getZooKeeperClusterKey(conf, null); - } - - /** - * Get the key to the ZK ensemble for this configuration and append - * a name at the end - * @param conf Configuration to use to build the key - * @param name Name that should be appended at the end if not empty or null - * @return ensemble key with a name (if any) - */ - public static String getZooKeeperClusterKey(Configuration conf, String name) { - String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM).replaceAll( - "[\\t\\n\\x0B\\f\\r]", ""); - StringBuilder builder = new StringBuilder(ensemble); - builder.append(":"); - builder.append(conf.get(HConstants.ZOOKEEPER_CLIENT_PORT)); - builder.append(":"); - builder.append(conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); - if (name != null && !name.isEmpty()) { - builder.append(","); - builder.append(name); - } - return builder.toString(); - } - - /** - * Apply the settings in the given key to the given configuration, this is - * used to communicate with distant clusters - * @param conf configuration object to configure - * @param key string that contains the 3 required configuratins - * @throws IOException - */ - public static void applyClusterKeyToConf(Configuration conf, String key) - throws IOException{ - ZKClusterKey zkClusterKey = transformClusterKey(key); - conf.set(HConstants.ZOOKEEPER_QUORUM, zkClusterKey.quorumString); - conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClusterKey.clientPort); - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkClusterKey.znodeParent); - } - - /** - * Separate the given key into the three configurations it should contain: - * hbase.zookeeper.quorum, hbase.zookeeper.client.port - * and zookeeper.znode.parent - * @param key - * @return the three configuration in the described order - * @throws IOException - */ - public static ZKClusterKey transformClusterKey(String key) throws IOException { - String[] parts = key.split(":"); - - if (parts.length == 3) { - return new ZKClusterKey(parts [0], Integer.parseInt(parts [1]), parts [2]); - } - - if (parts.length > 3) { - // The quorum could contain client port in server:clientport format, try to transform more. - String zNodeParent = parts [parts.length - 1]; - String clientPort = parts [parts.length - 2]; - - // The first part length is the total length minus the lengths of other parts and minus 2 ":" - int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2; - String quorumStringInput = key.substring(0, endQuorumIndex); - String[] serverHosts = quorumStringInput.split(","); - - // The common case is that every server has its own client port specified - this means - // that (total parts - the ZNodeParent part - the ClientPort part) is equal to - // (the number of "," + 1) - "+ 1" because the last server has no ",". - if ((parts.length - 2) == (serverHosts.length + 1)) { - return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent); - } - - // For the uncommon case that some servers has no port specified, we need to build the - // server:clientport list using default client port for servers without specified port. - return new ZKClusterKey( - ZKConfig.buildQuorumServerString(serverHosts, clientPort), - Integer.parseInt(clientPort), - zNodeParent); - } - - throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" + - HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":" - + HConstants.ZOOKEEPER_ZNODE_PARENT); - } - - /** - * Standardize the ZK quorum string: make it a "server:clientport" list, separated by ',' - * @param quorumStringInput a string contains a list of servers for ZK quorum - * @param clientPort the default client port - * @return the string for a list of "server:port" separated by "," - */ - @VisibleForTesting - public static String standardizeQuorumServerString(String quorumStringInput, String clientPort) { - String[] serverHosts = quorumStringInput.split(","); - return ZKConfig.buildQuorumServerString(serverHosts, clientPort); - } - // // Existence checks and watches // diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java index 72de935e876..eb629f2023b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java @@ -40,17 +40,6 @@ import org.junit.experimental.categories.Category; @Category({SmallTests.class}) public class TestZKUtil { - @Test - public void testGetZooKeeperClusterKey() { - Configuration conf = HBaseConfiguration.create(); - conf.set(HConstants.ZOOKEEPER_QUORUM, "\tlocalhost\n"); - conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "3333"); - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "hbase"); - String clusterKey = ZKUtil.getZooKeeperClusterKey(conf, "test"); - Assert.assertTrue(!clusterKey.contains("\t") && !clusterKey.contains("\n")); - Assert.assertEquals("localhost:3333:hbase,test", clusterKey); - } - @Test public void testCreateACL() throws ZooKeeperConnectionException, IOException { Configuration conf = HBaseConfiguration.create(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 505912ee6dd..94d44830a23 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -20,15 +20,16 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.util.Map.Entry; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; /** * Adds HBase configuration files to a Configuration @@ -113,7 +114,7 @@ public class HBaseConfiguration extends Configuration { * @param srcConf the source configuration **/ public static void merge(Configuration destConf, Configuration srcConf) { - for (Entry e : srcConf) { + for (Map.Entry e : srcConf) { destConf.set(e.getKey(), e.getValue()); } } @@ -127,7 +128,7 @@ public class HBaseConfiguration extends Configuration { */ public static Configuration subset(Configuration srcConf, String prefix) { Configuration newConf = new Configuration(false); - for (Entry entry : srcConf) { + for (Map.Entry entry : srcConf) { if (entry.getKey().startsWith(prefix)) { String newKey = entry.getKey().substring(prefix.length()); // avoid entries that would produce an empty key @@ -139,6 +140,18 @@ public class HBaseConfiguration extends Configuration { return newConf; } + /** + * Sets all the entries in the provided {@code Map} as properties in the + * given {@code Configuration}. Each property will have the specified prefix prepended, + * so that the configuration entries are keyed by {@code prefix + entry.getKey()}. + */ + public static void setWithPrefix(Configuration conf, String prefix, + Iterable> properties) { + for (Map.Entry entry : properties) { + conf.set(prefix + entry.getKey(), entry.getValue()); + } + } + /** * @return whether to show HBase Configuration in servlet */ @@ -233,7 +246,67 @@ public class HBaseConfiguration extends Configuration { return passwd; } - /** For debugging. Dump configurations to system output as xml format. + /** + * Generates a {@link Configuration} instance by applying the ZooKeeper cluster key + * to the base Configuration. Note that additional configuration properties may be needed + * for a remote cluster, so it is preferable to use + * {@link #createClusterConf(Configuration, String, String)}. + * + * @param baseConf the base configuration to use, containing prefixed override properties + * @param clusterKey the ZooKeeper quorum cluster key to apply, or {@code null} if none + * + * @return the merged configuration with override properties and cluster key applied + * + * @see #createClusterConf(Configuration, String, String) + */ + public static Configuration createClusterConf(Configuration baseConf, String clusterKey) + throws IOException { + return createClusterConf(baseConf, clusterKey, null); + } + + /** + * Generates a {@link Configuration} instance by applying property overrides prefixed by + * a cluster profile key to the base Configuration. Override properties are extracted by + * the {@link #subset(Configuration, String)} method, then the merged on top of the base + * Configuration and returned. + * + * @param baseConf the base configuration to use, containing prefixed override properties + * @param clusterKey the ZooKeeper quorum cluster key to apply, or {@code null} if none + * @param overridePrefix the property key prefix to match for override properties, + * or {@code null} if none + * @return the merged configuration with override properties and cluster key applied + */ + public static Configuration createClusterConf(Configuration baseConf, String clusterKey, + String overridePrefix) throws IOException { + Configuration clusterConf = HBaseConfiguration.create(baseConf); + if (clusterKey != null && !clusterKey.isEmpty()) { + applyClusterKeyToConf(clusterConf, clusterKey); + } + + if (overridePrefix != null && !overridePrefix.isEmpty()) { + Configuration clusterSubset = HBaseConfiguration.subset(clusterConf, overridePrefix); + HBaseConfiguration.merge(clusterConf, clusterSubset); + } + return clusterConf; + } + + /** + * Apply the settings in the given key to the given configuration, this is + * used to communicate with distant clusters + * @param conf configuration object to configure + * @param key string that contains the 3 required configuratins + * @throws IOException + */ + private static void applyClusterKeyToConf(Configuration conf, String key) + throws IOException{ + ZKConfig.ZKClusterKey zkClusterKey = ZKConfig.transformClusterKey(key); + conf.set(HConstants.ZOOKEEPER_QUORUM, zkClusterKey.getQuorumString()); + conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClusterKey.getClientPort()); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkClusterKey.getZnodeParent()); + } + + /** + * For debugging. Dump configurations to system output as xml format. * Master and RS configurations can also be dumped using * http services. e.g. "curl http://master:16010/dump" */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java similarity index 68% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java rename to hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 15752c29d3d..787b5cc792c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -25,11 +25,12 @@ import java.util.List; import java.util.Map.Entry; import java.util.Properties; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Utility methods for reading, and building the ZooKeeper configuration. @@ -40,7 +41,7 @@ import org.apache.hadoop.hbase.HConstants; * (3). other zookeeper related properties in HBASE XML */ @InterfaceAudience.Private -public class ZKConfig { +public final class ZKConfig { private static final Log LOG = LogFactory.getLog(ZKConfig.class); private static final String VARIABLE_START = "${"; @@ -48,6 +49,9 @@ public class ZKConfig { private static final String VARIABLE_END = "}"; private static final int VARIABLE_END_LENGTH = VARIABLE_END.length(); + private ZKConfig() { + } + /** * Make a Properties object holding ZooKeeper config. * Parses the corresponding config options from the HBase XML configs @@ -85,7 +89,7 @@ public class ZKConfig { "' to false"); // First check if there is a zoo.cfg in the CLASSPATH. If so, simply read // it and grab its configuration properties. - ClassLoader cl = HQuorumPeer.class.getClassLoader(); + ClassLoader cl = ZKConfig.class.getClassLoader(); final InputStream inputStream = cl.getResourceAsStream(HConstants.ZOOKEEPER_CONFIG_NAME); if (inputStream != null) { @@ -305,31 +309,7 @@ public class ZKConfig { // Build the ZK quorum server string with "server:clientport" list, separated by ',' final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST); - return buildQuorumServerString(serverHosts, defaultClientPort); - } - - /** - * Build the ZK quorum server string with "server:clientport" list, separated by ',' - * - * @param serverHosts a list of servers for ZK quorum - * @param clientPort the default client port - * @return the string for a list of "server:port" separated by "," - */ - public static String buildQuorumServerString(String[] serverHosts, String clientPort) { - StringBuilder quorumStringBuilder = new StringBuilder(); - String serverHost; - for (int i = 0; i < serverHosts.length; ++i) { - if (serverHosts[i].contains(":")) { - serverHost = serverHosts[i]; // just use the port specified from the input - } else { - serverHost = serverHosts[i] + ":" + clientPort; - } - if (i > 0) { - quorumStringBuilder.append(','); - } - quorumStringBuilder.append(serverHost); - } - return quorumStringBuilder.toString(); + return buildZKQuorumServerString(serverHosts, defaultClientPort); } /** @@ -347,4 +327,169 @@ public class ZKConfig { return getZKQuorumServersStringFromHbaseConfig(conf); } + + /** + * Build the ZK quorum server string with "server:clientport" list, separated by ',' + * + * @param serverHosts a list of servers for ZK quorum + * @param clientPort the default client port + * @return the string for a list of "server:port" separated by "," + */ + public static String buildZKQuorumServerString(String[] serverHosts, String clientPort) { + StringBuilder quorumStringBuilder = new StringBuilder(); + String serverHost; + for (int i = 0; i < serverHosts.length; ++i) { + if (serverHosts[i].contains(":")) { + serverHost = serverHosts[i]; // just use the port specified from the input + } else { + serverHost = serverHosts[i] + ":" + clientPort; + } + if (i > 0) { + quorumStringBuilder.append(','); + } + quorumStringBuilder.append(serverHost); + } + return quorumStringBuilder.toString(); + } + + /** + * Verifies that the given key matches the expected format for a ZooKeeper cluster key. + * The Quorum for the ZK cluster can have one the following formats (see examples below): + * + *
    + *
  1. s1,s2,s3 (no client port in the list, the client port could be obtained from + * clientPort)
  2. + *
  3. s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server, + * in this case, the clientPort would be ignored)
  4. + *
  5. s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use + * the clientPort; otherwise, it would use the specified port)
  6. + *
+ * + * @param key the cluster key to validate + * @throws IOException if the key could not be parsed + */ + public static void validateClusterKey(String key) throws IOException { + transformClusterKey(key); + } + + /** + * Separate the given key into the three configurations it should contain: + * hbase.zookeeper.quorum, hbase.zookeeper.client.port + * and zookeeper.znode.parent + * @param key + * @return the three configuration in the described order + * @throws IOException + */ + public static ZKClusterKey transformClusterKey(String key) throws IOException { + String[] parts = key.split(":"); + + if (parts.length == 3) { + return new ZKClusterKey(parts [0], Integer.parseInt(parts [1]), parts [2]); + } + + if (parts.length > 3) { + // The quorum could contain client port in server:clientport format, try to transform more. + String zNodeParent = parts [parts.length - 1]; + String clientPort = parts [parts.length - 2]; + + // The first part length is the total length minus the lengths of other parts and minus 2 ":" + int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2; + String quorumStringInput = key.substring(0, endQuorumIndex); + String[] serverHosts = quorumStringInput.split(","); + + // The common case is that every server has its own client port specified - this means + // that (total parts - the ZNodeParent part - the ClientPort part) is equal to + // (the number of "," + 1) - "+ 1" because the last server has no ",". + if ((parts.length - 2) == (serverHosts.length + 1)) { + return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent); + } + + // For the uncommon case that some servers has no port specified, we need to build the + // server:clientport list using default client port for servers without specified port. + return new ZKClusterKey( + buildZKQuorumServerString(serverHosts, clientPort), + Integer.parseInt(clientPort), + zNodeParent); + } + + throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" + + HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":" + + HConstants.ZOOKEEPER_ZNODE_PARENT); + } + + /** + * Get the key to the ZK ensemble for this configuration without + * adding a name at the end + * @param conf Configuration to use to build the key + * @return ensemble key without a name + */ + public static String getZooKeeperClusterKey(Configuration conf) { + return getZooKeeperClusterKey(conf, null); + } + + /** + * Get the key to the ZK ensemble for this configuration and append + * a name at the end + * @param conf Configuration to use to build the key + * @param name Name that should be appended at the end if not empty or null + * @return ensemble key with a name (if any) + */ + public static String getZooKeeperClusterKey(Configuration conf, String name) { + String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM).replaceAll( + "[\\t\\n\\x0B\\f\\r]", ""); + StringBuilder builder = new StringBuilder(ensemble); + builder.append(":"); + builder.append(conf.get(HConstants.ZOOKEEPER_CLIENT_PORT)); + builder.append(":"); + builder.append(conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + if (name != null && !name.isEmpty()) { + builder.append(","); + builder.append(name); + } + return builder.toString(); + } + + /** + * Standardize the ZK quorum string: make it a "server:clientport" list, separated by ',' + * @param quorumStringInput a string contains a list of servers for ZK quorum + * @param clientPort the default client port + * @return the string for a list of "server:port" separated by "," + */ + @VisibleForTesting + public static String standardizeZKQuorumServerString(String quorumStringInput, + String clientPort) { + String[] serverHosts = quorumStringInput.split(","); + return buildZKQuorumServerString(serverHosts, clientPort); + } + + // The Quorum for the ZK cluster can have one the following format (see examples below): + // (1). s1,s2,s3 (no client port in the list, the client port could be obtained from clientPort) + // (2). s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server, + // in this case, the clientPort would be ignored) + // (3). s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use + // the clientPort; otherwise, it would use the specified port) + @VisibleForTesting + public static class ZKClusterKey { + private String quorumString; + private int clientPort; + private String znodeParent; + + ZKClusterKey(String quorumString, int clientPort, String znodeParent) { + this.quorumString = quorumString; + this.clientPort = clientPort; + this.znodeParent = znodeParent; + } + + public String getQuorumString() { + return quorumString; + } + + public int getClientPort() { + return clientPort; + } + + public String getZnodeParent() { + return znodeParent; + } + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index bbddb602ab8..f8b60fd9889 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; @@ -27,10 +28,12 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; +import com.google.common.collect.ImmutableMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -72,8 +75,11 @@ public class TestHBaseConfiguration { String prefix = "hbase.mapred.output."; conf.set("hbase.security.authentication", "kerberos"); conf.set("hbase.regionserver.kerberos.principal", "hbasesource"); - conf.set(prefix + "hbase.regionserver.kerberos.principal", "hbasedest"); - conf.set(prefix, "shouldbemissing"); + HBaseConfiguration.setWithPrefix(conf, prefix, + ImmutableMap.of( + "hbase.regionserver.kerberos.principal", "hbasedest", + "", "shouldbemissing") + .entrySet()); Configuration subsetConf = HBaseConfiguration.subset(conf, prefix); assertNull(subsetConf.get(prefix + "hbase.regionserver.kerberos.principal")); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java new file mode 100644 index 00000000000..7879aea7101 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@Category({MiscTests.class, SmallTests.class}) +public class TestZKConfig { + + @Test + public void testZKConfigLoading() throws Exception { + Configuration conf = HBaseConfiguration.create(); + // Test that we read only from the config instance + // (i.e. via hbase-default.xml and hbase-site.xml) + conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181); + Properties props = ZKConfig.makeZKProps(conf); + assertEquals("Property client port should have been default from the HBase config", + "2181", + props.getProperty("clientPort")); + } + + @Test + public void testGetZooKeeperClusterKey() { + Configuration conf = HBaseConfiguration.create(); + conf.set(HConstants.ZOOKEEPER_QUORUM, "\tlocalhost\n"); + conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "3333"); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "hbase"); + String clusterKey = ZKConfig.getZooKeeperClusterKey(conf, "test"); + assertTrue(!clusterKey.contains("\t") && !clusterKey.contains("\n")); + assertEquals("localhost:3333:hbase,test", clusterKey); + } + + @Test + public void testClusterKey() throws Exception { + testKey("server", 2181, "hbase"); + testKey("server1,server2,server3", 2181, "hbase"); + try { + ZKConfig.validateClusterKey("2181:hbase"); + } catch (IOException ex) { + // OK + } + } + + @Test + public void testClusterKeyWithMultiplePorts() throws Exception { + // server has different port than the default port + testKey("server1:2182", 2181, "hbase", true); + // multiple servers have their own port + testKey("server1:2182,server2:2183,server3:2184", 2181, "hbase", true); + // one server has no specified port, should use default port + testKey("server1:2182,server2,server3:2184", 2181, "hbase", true); + // the last server has no specified port, should use default port + testKey("server1:2182,server2:2183,server3", 2181, "hbase", true); + // multiple servers have no specified port, should use default port for those servers + testKey("server1:2182,server2,server3:2184,server4", 2181, "hbase", true); + // same server, different ports + testKey("server1:2182,server1:2183,server1", 2181, "hbase", true); + // mix of same server/different port and different server + testKey("server1:2182,server2:2183,server1", 2181, "hbase", true); + } + + private void testKey(String ensemble, int port, String znode) + throws IOException { + testKey(ensemble, port, znode, false); // not support multiple client ports + } + + private void testKey(String ensemble, int port, String znode, Boolean multiplePortSupport) + throws IOException { + Configuration conf = new Configuration(); + String key = ensemble+":"+port+":"+znode; + String ensemble2 = null; + ZKConfig.ZKClusterKey zkClusterKey = ZKConfig.transformClusterKey(key); + if (multiplePortSupport) { + ensemble2 = ZKConfig.standardizeZKQuorumServerString(ensemble, + Integer.toString(port)); + assertEquals(ensemble2, zkClusterKey.getQuorumString()); + } + else { + assertEquals(ensemble, zkClusterKey.getQuorumString()); + } + assertEquals(port, zkClusterKey.getClientPort()); + assertEquals(znode, zkClusterKey.getZnodeParent()); + + conf = HBaseConfiguration.createClusterConf(conf, key); + assertEquals(zkClusterKey.getQuorumString(), conf.get(HConstants.ZOOKEEPER_QUORUM)); + assertEquals(zkClusterKey.getClientPort(), conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, -1)); + assertEquals(zkClusterKey.getZnodeParent(), conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + + String reconstructedKey = ZKConfig.getZooKeeperClusterKey(conf); + if (multiplePortSupport) { + String key2 = ensemble2 + ":" + port + ":" + znode; + assertEquals(key2, reconstructedKey); + } + else { + assertEquals(key, reconstructedKey); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index 3495ca97006..23fd10eba49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; @@ -174,8 +173,9 @@ public class SyncTable extends Configured implements Tool { Configuration conf = context.getConfiguration(); sourceHashDir = new Path(conf.get(SOURCE_HASH_DIR_CONF_KEY)); - sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY); - targetConnection = openConnection(conf, TARGET_ZK_CLUSTER_CONF_KEY); + sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY, null); + targetConnection = openConnection(conf, TARGET_ZK_CLUSTER_CONF_KEY, + TableOutputFormat.OUTPUT_CONF_PREFIX); sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY); targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY); dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false); @@ -196,13 +196,12 @@ public class SyncTable extends Configured implements Tool { targetHasher = new HashTable.ResultHasher(); } - private static Connection openConnection(Configuration conf, String zkClusterConfKey) + private static Connection openConnection(Configuration conf, String zkClusterConfKey, + String configPrefix) throws IOException { - Configuration clusterConf = new Configuration(conf); String zkCluster = conf.get(zkClusterConfKey); - if (zkCluster != null) { - ZKUtil.applyClusterKeyToConf(clusterConf, zkCluster); - } + Configuration clusterConf = HBaseConfiguration.createClusterConf(conf, + zkCluster, configPrefix); return ConnectionFactory.createConnection(clusterConf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index fdd68ce5d5f..161488363f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -26,6 +26,7 @@ import java.util.*; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -49,12 +50,11 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.TokenUtil; import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.StringUtils; -import com.google.protobuf.InvalidProtocolBufferException; /** * Utility for {@link TableMapper} and {@link TableReducer} @@ -475,12 +475,8 @@ public class TableMapReduceUtil { String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS); User user = userProvider.getCurrent(); if (quorumAddress != null) { - Configuration peerConf = HBaseConfiguration.create(job.getConfiguration()); - ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress); - // apply any "hbase.mapred.output." configuration overrides - Configuration outputOverrides = - HBaseConfiguration.subset(peerConf, TableOutputFormat.OUTPUT_CONF_PREFIX); - HBaseConfiguration.merge(peerConf, outputOverrides); + Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), + quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); Connection peerConn = ConnectionFactory.createConnection(peerConf); try { TokenUtil.addTokenForJob(peerConn, user, job); @@ -513,15 +509,30 @@ public class TableMapReduceUtil { * @param job The job that requires the permission. * @param quorumAddress string that contains the 3 required configuratins * @throws IOException When the authentication token cannot be obtained. + * @deprecated Since 1.2.0, use {@link #initCredentialsForCluster(Job, Configuration)} instead. */ + @Deprecated public static void initCredentialsForCluster(Job job, String quorumAddress) throws IOException { + Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), + quorumAddress); + initCredentialsForCluster(job, peerConf); + } + + /** + * Obtain an authentication token, for the specified cluster, on behalf of the current user + * and add it to the credentials for the given map reduce job. + * + * @param job The job that requires the permission. + * @param conf The configuration to use in connecting to the peer cluster + * @throws IOException When the authentication token cannot be obtained. + */ + public static void initCredentialsForCluster(Job job, Configuration conf) + throws IOException { UserProvider userProvider = UserProvider.instantiate(job.getConfiguration()); if (userProvider.isHBaseSecurityEnabled()) { try { - Configuration peerConf = HBaseConfiguration.create(job.getConfiguration()); - ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress); - Connection peerConn = ConnectionFactory.createConnection(peerConf); + Connection peerConn = ConnectionFactory.createConnection(conf); try { TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job); } finally { @@ -670,7 +681,7 @@ public class TableMapReduceUtil { // If passed a quorum/ensemble address, pass it on to TableOutputFormat. if (quorumAddress != null) { // Calling this will validate the format - ZKUtil.transformClusterKey(quorumAddress); + ZKConfig.validateClusterKey(quorumAddress); conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress); } if (serverClass != null && serverImpl != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 190962ecf80..5904f9cec8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.OutputFormat; @@ -191,22 +190,19 @@ implements Configurable { @Override public void setConf(Configuration otherConf) { - this.conf = HBaseConfiguration.create(otherConf); - - String tableName = this.conf.get(OUTPUT_TABLE); + String tableName = otherConf.get(OUTPUT_TABLE); if(tableName == null || tableName.length() <= 0) { throw new IllegalArgumentException("Must specify table name"); } - String address = this.conf.get(QUORUM_ADDRESS); - int zkClientPort = this.conf.getInt(QUORUM_PORT, 0); - String serverClass = this.conf.get(REGION_SERVER_CLASS); - String serverImpl = this.conf.get(REGION_SERVER_IMPL); + String address = otherConf.get(QUORUM_ADDRESS); + int zkClientPort = otherConf.getInt(QUORUM_PORT, 0); + String serverClass = otherConf.get(REGION_SERVER_CLASS); + String serverImpl = otherConf.get(REGION_SERVER_IMPL); try { - if (address != null) { - ZKUtil.applyClusterKeyToConf(this.conf, address); - } + this.conf = HBaseConfiguration.createClusterConf(otherConf, address, OUTPUT_CONF_PREFIX); + if (serverClass != null) { this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl); } @@ -217,9 +213,5 @@ implements Configurable { LOG.error(e); throw new RuntimeException(e); } - - // finally apply any remaining "hbase.mapred.output." configuration overrides - Configuration outputOverrides = HBaseConfiguration.subset(otherConf, OUTPUT_CONF_PREFIX); - HBaseConfiguration.merge(this.conf, outputOverrides); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 9bd2a6c9594..75dfe9e81b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.util.Tool; @@ -69,6 +68,7 @@ public class VerifyReplication extends Configured implements Tool { LogFactory.getLog(VerifyReplication.class); public final static String NAME = "verifyrep"; + private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; static long startTime = 0; static long endTime = Long.MAX_VALUE; static int versions = -1; @@ -126,8 +126,8 @@ public class VerifyReplication extends Configured implements Tool { @Override public Void connect(HConnection conn) throws IOException { String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); - Configuration peerConf = HBaseConfiguration.create(conf); - ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey); + Configuration peerConf = HBaseConfiguration.createClusterConf(conf, + zkClusterKey, PEER_CONFIG_PREFIX); TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName")); replicatedTable = new HTable(peerConf, tableName); @@ -203,7 +203,8 @@ public class VerifyReplication extends Configured implements Tool { } } - private static String getPeerQuorumAddress(final Configuration conf) throws IOException { + private static Pair getPeerQuorumConfig( + final Configuration conf) throws IOException { ZooKeeperWatcher localZKW = null; ReplicationPeerZKImpl peer = null; try { @@ -220,8 +221,8 @@ public class VerifyReplication extends Configured implements Tool { if (pair == null) { throw new IOException("Couldn't get peer conf!"); } - Configuration peerConf = rp.getPeerConf(peerId).getSecond(); - return ZKUtil.getZooKeeperClusterKey(peerConf); + + return pair; } catch (ReplicationException e) { throw new IOException( "An error occured while trying to connect to the remove peer cluster", e); @@ -260,9 +261,14 @@ public class VerifyReplication extends Configured implements Tool { conf.set(NAME+".families", families); } - String peerQuorumAddress = getPeerQuorumAddress(conf); + Pair peerConfigPair = getPeerQuorumConfig(conf); + ReplicationPeerConfig peerConfig = peerConfigPair.getFirst(); + String peerQuorumAddress = peerConfig.getClusterKey(); + LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + + peerConfig.getConfiguration()); conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress); - LOG.info("Peer Quorum Address: " + peerQuorumAddress); + HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX, + peerConfig.getConfiguration().entrySet()); conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); @@ -285,8 +291,9 @@ public class VerifyReplication extends Configured implements Tool { TableMapReduceUtil.initTableMapperJob(tableName, scan, Verifier.class, null, null, job); + Configuration peerClusterConf = peerConfigPair.getSecond(); // Obtain the auth token from peer cluster - TableMapReduceUtil.initCredentialsForCluster(job, peerQuorumAddress); + TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf); job.setOutputFormatClass(NullOutputFormat.class); job.setNumReduceTasks(0); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index 5c61afb7464..2ba1b477d8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; /** * Similar to {@link RegionReplicaUtil} but for the server side @@ -148,7 +148,7 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { try { if (repAdmin.getPeerConfig(REGION_REPLICA_REPLICATION_PEER) == null) { ReplicationPeerConfig peerConfig = new ReplicationPeerConfig(); - peerConfig.setClusterKey(ZKUtil.getZooKeeperClusterKey(conf)); + peerConfig.setClusterKey(ZKConfig.getZooKeeperClusterKey(conf)); peerConfig.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()); repAdmin.addPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig, null); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index a7566522126..3441aa629bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -345,71 +345,6 @@ public class TestZooKeeper { assertNull(ZKUtil.getDataNoWatch(zkw, "/l1/l2", null)); } - @Test - public void testClusterKey() throws Exception { - testKey("server", 2181, "hbase"); - testKey("server1,server2,server3", 2181, "hbase"); - try { - ZKUtil.transformClusterKey("2181:hbase"); - } catch (IOException ex) { - // OK - } - } - - @Test - public void testClusterKeyWithMultiplePorts() throws Exception { - // server has different port than the default port - testKey("server1:2182", 2181, "hbase", true); - // multiple servers have their own port - testKey("server1:2182,server2:2183,server3:2184", 2181, "hbase", true); - // one server has no specified port, should use default port - testKey("server1:2182,server2,server3:2184", 2181, "hbase", true); - // the last server has no specified port, should use default port - testKey("server1:2182,server2:2183,server3", 2181, "hbase", true); - // multiple servers have no specified port, should use default port for those servers - testKey("server1:2182,server2,server3:2184,server4", 2181, "hbase", true); - // same server, different ports - testKey("server1:2182,server1:2183,server1", 2181, "hbase", true); - // mix of same server/different port and different server - testKey("server1:2182,server2:2183,server1", 2181, "hbase", true); - } - - private void testKey(String ensemble, int port, String znode) - throws IOException { - testKey(ensemble, port, znode, false); // not support multiple client ports - } - - private void testKey(String ensemble, int port, String znode, Boolean multiplePortSupport) - throws IOException { - Configuration conf = new Configuration(); - String key = ensemble+":"+port+":"+znode; - String ensemble2 = null; - ZKUtil.ZKClusterKey zkClusterKey = ZKUtil.transformClusterKey(key); - if (multiplePortSupport) { - ensemble2 = ZKUtil.standardizeQuorumServerString(ensemble, Integer.toString(port)); - assertEquals(ensemble2, zkClusterKey.quorumString); - } - else { - assertEquals(ensemble, zkClusterKey.quorumString); - } - assertEquals(port, zkClusterKey.clientPort); - assertEquals(znode, zkClusterKey.znodeParent); - - ZKUtil.applyClusterKeyToConf(conf, key); - assertEquals(zkClusterKey.quorumString, conf.get(HConstants.ZOOKEEPER_QUORUM)); - assertEquals(zkClusterKey.clientPort, conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, -1)); - assertEquals(zkClusterKey.znodeParent, conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); - - String reconstructedKey = ZKUtil.getZooKeeperClusterKey(conf); - if (multiplePortSupport) { - String key2 = ensemble2 + ":" + port + ":" + znode; - assertEquals(key2, reconstructedKey); - } - else { - assertEquals(key, reconstructedKey); - } - } - /** * A test for HBASE-3238 * @throws IOException A connection attempt to zk failed diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index d5e0e31a0e4..119cee5f13f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -24,9 +24,13 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; +import org.apache.hadoop.hbase.replication.ReplicationPeer; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationQueues; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -37,10 +41,12 @@ import org.junit.experimental.categories.Category; import com.google.common.collect.Lists; -import static org.junit.Assert.fail; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + /** * Unit testing of ReplicationAdmin @@ -135,7 +141,7 @@ public class TestReplicationAdmin { } repQueues.removeQueue(ID_ONE); assertEquals(0, repQueues.getAllQueues().size()); - + // add recovered queue for ID_ONE repQueues.addLog(ID_ONE + "-server2", "file1"); try { @@ -148,6 +154,28 @@ public class TestReplicationAdmin { zkw.close(); } + /** + * Tests that the peer configuration used by ReplicationAdmin contains all + * the peer's properties. + */ + @Test + public void testPeerConfig() throws Exception { + ReplicationPeerConfig config = new ReplicationPeerConfig(); + config.setClusterKey(KEY_ONE); + config.getConfiguration().put("key1", "value1"); + config.getConfiguration().put("key2", "value2"); + admin.addPeer(ID_ONE, config, null); + + List peers = admin.listValidReplicationPeers(); + assertEquals(1, peers.size()); + ReplicationPeer peerOne = peers.get(0); + assertNotNull(peerOne); + assertEquals("value1", peerOne.getConfiguration().get("key1")); + assertEquals("value2", peerOne.getConfiguration().get("key2")); + + admin.removePeer(ID_ONE); + } + /** * basic checks that when we add a peer that it is enabled, and that we can disable * @throws Exception diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index ba8d75c8901..d5705499aeb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplica import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -114,7 +114,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { public void testCustomReplicationEndpoint() throws Exception { // test installing a custom replication endpoint other than the default one. admin.addPeer("testCustomReplicationEndpoint", - new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1)) + new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1)) .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null); // check whether the class has been constructed and started @@ -156,7 +156,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { int peerCount = admin.getPeersCount(); final String id = "testReplicationEndpointReturnsFalseOnReplicate"; admin.addPeer(id, - new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1)) + new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1)) .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()), null); // This test is flakey and then there is so much stuff flying around in here its, hard to // debug. Peer needs to be up for the edit to make it across. This wait on @@ -208,7 +208,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { } admin.addPeer(id, - new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf2)) + new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf2)) .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()), null); @@ -233,7 +233,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { @Test (timeout=120000) public void testWALEntryFilterFromReplicationEndpoint() throws Exception { admin.addPeer("testWALEntryFilterFromReplicationEndpoint", - new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1)) + new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1)) .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()), null); // now replicate some data. try (Connection connection = ConnectionFactory.createConnection(conf1)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index f05ecebb21a..696c130a988 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -27,7 +27,7 @@ import java.util.SortedSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.zookeeper.KeeperException; import org.junit.Before; import org.junit.Test; @@ -202,7 +202,7 @@ public abstract class TestReplicationStateBasic { fail("There are no connected peers, should have thrown an IllegalArgumentException"); } catch (IllegalArgumentException e) { } - assertEquals(KEY_ONE, ZKUtil.getZooKeeperClusterKey(rp.getPeerConf(ID_ONE).getSecond())); + assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(rp.getPeerConf(ID_ONE).getSecond())); rp.removePeer(ID_ONE); rp.peerRemoved(ID_ONE); assertNumberOfPeers(1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index e0172d6edd5..a9222ab36a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -79,7 +80,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { String fakeRs = ZKUtil.joinZNode(zkw1.rsZNode, "hostname1.example.org:1234"); ZKUtil.createWithParents(zkw1, fakeRs); ZKClusterId.setClusterId(zkw1, new ClusterId()); - return ZKUtil.getZooKeeperClusterKey(testConf); + return ZKConfig.getZooKeeperClusterKey(testConf); } @Before @@ -94,7 +95,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { rq3 = ReplicationFactory.getReplicationQueues(zkw, conf, ds3); rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, ds1); rp = ReplicationFactory.getReplicationPeers(zkw, conf, zkw); - OUR_KEY = ZKUtil.getZooKeeperClusterKey(conf); + OUR_KEY = ZKConfig.getZooKeeperClusterKey(conf); rqZK = new ReplicationQueuesZKImpl(zkw, conf, ds1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java index bdda4cf954d..2c8119a55e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -129,7 +129,8 @@ public class TestRegionReplicaReplicationEndpoint { // assert peer configuration is correct peerConfig = admin.getPeerConfig(peerId); assertNotNull(peerConfig); - assertEquals(peerConfig.getClusterKey(), ZKUtil.getZooKeeperClusterKey(HTU.getConfiguration())); + assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey( + HTU.getConfiguration())); assertEquals(peerConfig.getReplicationEndpointImpl(), RegionReplicaReplicationEndpoint.class.getName()); admin.close(); @@ -162,7 +163,8 @@ public class TestRegionReplicaReplicationEndpoint { // assert peer configuration is correct peerConfig = admin.getPeerConfig(peerId); assertNotNull(peerConfig); - assertEquals(peerConfig.getClusterKey(), ZKUtil.getZooKeeperClusterKey(HTU.getConfiguration())); + assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey( + HTU.getConfiguration())); assertEquals(peerConfig.getReplicationEndpointImpl(), RegionReplicaReplicationEndpoint.class.getName()); admin.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java deleted file mode 100644 index 9363a3f1ea7..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKConfig.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.util.Properties; - -import junit.framework.Assert; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(SmallTests.class) -public class TestZKConfig { - @Test - public void testZKConfigLoading() throws Exception { - // Test depends on test resource 'zoo.cfg' at src/test/resources/zoo.cfg - Configuration conf = HBaseConfiguration.create(); - // Test that by default we do not pick up any property from the zoo.cfg - // since that feature is to be deprecated and removed. So we should read only - // from the config instance (i.e. via hbase-default.xml and hbase-site.xml) - conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181); - Properties props = ZKConfig.makeZKProps(conf); - Assert.assertEquals( - "Property client port should have been default from the HBase config", - "2181", - props.getProperty("clientPort")); - // Test deprecated zoo.cfg read support by explicitly enabling it and - // thereby relying on our test resource zoo.cfg to be read. - // We may remove this test after a higher release (i.e. post-deprecation). - conf.setBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, true); - props = ZKConfig.makeZKProps(conf); - Assert.assertEquals( - "Property client port should have been from zoo.cfg", - "9999", - props.getProperty("clientPort")); - } -} From 65117d3d04349f2ebb445a34f761fa83ce8e6aa3 Mon Sep 17 00:00:00 2001 From: ramkrishna Date: Thu, 10 Dec 2015 13:10:41 +0530 Subject: [PATCH 06/72] HBASE-13153 Bulk Loaded HFile Replication (Ashish Singhi) --- .../hbase/replication/ReplicationPeers.java | 2 +- .../replication/ReplicationPeersZKImpl.java | 26 +- .../hbase/replication/ReplicationQueues.java | 25 +- .../replication/ReplicationQueuesClient.java | 25 +- .../ReplicationQueuesClientZKImpl.java | 37 ++ .../replication/ReplicationQueuesZKImpl.java | 70 ++ .../replication/ReplicationStateZKBase.java | 14 +- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 24 +- .../org/apache/hadoop/hbase/HConstants.java | 16 +- .../MetricsReplicationSinkSource.java | 2 + .../MetricsReplicationSourceSource.java | 6 + .../MetricsReplicationGlobalSourceSource.java | 21 + .../MetricsReplicationSinkSourceImpl.java | 7 + .../MetricsReplicationSourceSourceImpl.java | 28 + .../hbase/protobuf/generated/AdminProtos.java | 602 ++++++++++++++++-- hbase-protocol/src/main/protobuf/Admin.proto | 3 + .../mapreduce/LoadIncrementalHFiles.java | 269 +++++--- .../protobuf/ReplicationProtbufUtil.java | 46 +- .../hbase/regionserver/RSRpcServices.java | 4 +- .../regionserver/ReplicationSinkService.java | 8 +- .../regionserver/wal/WALActionsListener.java | 19 +- .../replication/ScopeWALEntryFilter.java | 71 ++- .../replication/TableCfWALEntryFilter.java | 77 ++- .../master/ReplicationHFileCleaner.java | 193 ++++++ .../DefaultSourceFSConfigurationProvider.java | 78 +++ .../HBaseInterClusterReplicationEndpoint.java | 33 +- .../regionserver/HFileReplicator.java | 393 ++++++++++++ .../replication/regionserver/MetricsSink.java | 13 +- .../regionserver/MetricsSource.java | 31 + .../RegionReplicaReplicationEndpoint.java | 4 +- .../replication/regionserver/Replication.java | 138 +++- .../regionserver/ReplicationSink.java | 200 +++++- .../regionserver/ReplicationSource.java | 92 ++- .../ReplicationSourceInterface.java | 13 + .../ReplicationSourceManager.java | 21 + .../SourceFSConfigurationProvider.java | 40 ++ .../access/SecureBulkLoadEndpoint.java | 18 +- .../cleaner/TestReplicationHFileCleaner.java | 264 ++++++++ .../replication/ReplicationSourceDummy.java | 8 + .../replication/TestMasterReplication.java | 312 ++++++++- .../TestReplicationSmallTests.java | 3 +- .../TestReplicationStateBasic.java | 57 ++ .../TestReplicationStateZKImpl.java | 1 + .../TestReplicationSyncUpTool.java | 10 +- ...plicationSyncUpToolWithBulkLoadedData.java | 235 +++++++ .../regionserver/TestReplicationSink.java | 171 ++++- .../TestReplicationSourceManager.java | 70 +- .../TestSourceFSConfigurationProvider.java | 25 + 48 files changed, 3500 insertions(+), 325 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 62585f14f8c..745997e3710 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -50,7 +50,7 @@ public interface ReplicationPeers { * @param peerId a short that identifies the cluster * @param peerConfig configuration for the replication slave cluster * @param tableCFs the table and column-family list which will be replicated for this peer or null - * for all table and column families + * for all table and column families */ void addPeer(String peerId, ReplicationPeerConfig peerConfig, String tableCFs) throws ReplicationException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 7099bfc61ed..479b1c74739 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp; import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; import com.google.protobuf.ByteString; @@ -120,8 +121,21 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } checkQueuesDeleted(id); - + ZKUtil.createWithParents(this.zookeeper, this.peersZNode); + + // If only bulk load hfile replication is enabled then add peerId node to hfile-refs node + if (replicationForBulkLoadEnabled) { + try { + String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id); + LOG.info("Adding peer " + peerId + " to hfile reference queue."); + ZKUtil.createWithParents(this.zookeeper, peerId); + } catch (KeeperException e) { + throw new ReplicationException("Failed to add peer with id=" + id + + ", node under hfile references node.", e); + } + } + List listOfOps = new ArrayList(); ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(this.peersZNode, id), toByteArray(peerConfig)); @@ -151,6 +165,16 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re + " because that id does not exist."); } ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id)); + // Delete peerId node from hfile-refs node irrespective of whether bulk loaded hfile + // replication is enabled or not + + String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id); + try { + LOG.info("Removing peer " + peerId + " from hfile reference queue."); + ZKUtil.deleteNodeRecursively(this.zookeeper, peerId); + } catch (NoNodeException e) { + LOG.info("Did not find node " + peerId + " to delete.", e); + } } catch (KeeperException e) { throw new ReplicationException("Could not remove peer with id=" + id, e); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java index 3dbbc336540..0d47a88cb4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java @@ -26,7 +26,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * This provides an interface for maintaining a region server's replication queues. These queues - * keep track of the WALs that still need to be replicated to remote clusters. + * keep track of the WALs and HFile references (if hbase.replication.bulkload.enabled is enabled) + * that still need to be replicated to remote clusters. */ @InterfaceAudience.Private public interface ReplicationQueues { @@ -113,4 +114,26 @@ public interface ReplicationQueues { * @return if this is this rs's znode */ boolean isThisOurZnode(String znode); + + /** + * Add a peer to hfile reference queue if peer does not exist. + * @param peerId peer cluster id to be added + * @throws ReplicationException if fails to add a peer id to hfile reference queue + */ + void addPeerToHFileRefs(String peerId) throws ReplicationException; + + /** + * Add new hfile references to the queue. + * @param peerId peer cluster id to which the hfiles need to be replicated + * @param files list of hfile references to be added + * @throws ReplicationException if fails to add a hfile reference + */ + void addHFileRefs(String peerId, List files) throws ReplicationException; + + /** + * Remove hfile references from the queue. + * @param peerId peer cluster id from which this hfile references needs to be removed + * @param files list of hfile references to be removed + */ + void removeHFileRefs(String peerId, List files); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java index 5b3e541b696..7fa3bbb0930 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java @@ -25,7 +25,8 @@ import org.apache.zookeeper.KeeperException; /** * This provides an interface for clients of replication to view replication queues. These queues - * keep track of the WALs that still need to be replicated to remote clusters. + * keep track of the sources(WALs/HFile references) that still need to be replicated to remote + * clusters. */ @InterfaceAudience.Private public interface ReplicationQueuesClient { @@ -65,4 +66,26 @@ public interface ReplicationQueuesClient { * @return cversion of replication rs node */ int getQueuesZNodeCversion() throws KeeperException; + + /** + * Get the change version number of replication hfile references node. This can be used as + * optimistic locking to get a consistent snapshot of the replication queues of hfile references. + * @return change version number of hfile references node + */ + int getHFileRefsNodeChangeVersion() throws KeeperException; + + /** + * Get list of all peers from hfile reference queue. + * @return a list of peer ids + * @throws KeeperException zookeeper exception + */ + List getAllPeersFromHFileRefsQueue() throws KeeperException; + + /** + * Get a list of all hfile references in the given peer. + * @param peerId a String that identifies the peer + * @return a list of hfile references, null if not found any + * @throws KeeperException zookeeper exception + */ + List getReplicableHFiles(String peerId) throws KeeperException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java index e1a6a495917..cc407e3ff1d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java @@ -84,4 +84,41 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem throw e; } } + + @Override + public int getHFileRefsNodeChangeVersion() throws KeeperException { + Stat stat = new Stat(); + try { + ZKUtil.getDataNoWatch(this.zookeeper, this.hfileRefsZNode, stat); + } catch (KeeperException e) { + this.abortable.abort("Failed to get stat of replication hfile references node.", e); + throw e; + } + return stat.getCversion(); + } + + @Override + public List getAllPeersFromHFileRefsQueue() throws KeeperException { + List result = null; + try { + result = ZKUtil.listChildrenNoWatch(this.zookeeper, this.hfileRefsZNode); + } catch (KeeperException e) { + this.abortable.abort("Failed to get list of all peers in hfile references node.", e); + throw e; + } + return result; + } + + @Override + public List getReplicableHFiles(String peerId) throws KeeperException { + String znode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + List result = null; + try { + result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); + } catch (KeeperException e) { + this.abortable.abort("Failed to get list of hfile references for peerId=" + peerId, e); + throw e; + } + return result; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java index 26ca6ba9b5a..e9be50bcacc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java @@ -84,6 +84,15 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R } catch (KeeperException e) { throw new ReplicationException("Could not initialize replication queues.", e); } + // If only bulk load hfile replication is enabled then create the hfile-refs znode + if (replicationForBulkLoadEnabled) { + try { + ZKUtil.createWithParents(this.zookeeper, this.hfileRefsZNode); + } catch (KeeperException e) { + throw new ReplicationException("Could not initialize hfile references replication queue.", + e); + } + } } @Override @@ -431,4 +440,65 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R ZooKeeperProtos.ReplicationLock.newBuilder().setLockOwner(lockOwner).build().toByteArray(); return ProtobufUtil.prependPBMagic(bytes); } + + @Override + public void addHFileRefs(String peerId, List files) throws ReplicationException { + String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + boolean debugEnabled = LOG.isDebugEnabled(); + if (debugEnabled) { + LOG.debug("Adding hfile references " + files + " in queue " + peerZnode); + } + List listOfOps = new ArrayList(); + int size = files.size(); + for (int i = 0; i < size; i++) { + listOfOps.add(ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i)), + HConstants.EMPTY_BYTE_ARRAY)); + } + if (debugEnabled) { + LOG.debug(" The multi list size for adding hfile references in zk for node " + peerZnode + + " is " + listOfOps.size()); + } + try { + ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); + } catch (KeeperException e) { + throw new ReplicationException("Failed to create hfile reference znode=" + e.getPath(), e); + } + } + + @Override + public void removeHFileRefs(String peerId, List files) { + String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + boolean debugEnabled = LOG.isDebugEnabled(); + if (debugEnabled) { + LOG.debug("Removing hfile references " + files + " from queue " + peerZnode); + } + List listOfOps = new ArrayList(); + int size = files.size(); + for (int i = 0; i < size; i++) { + listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i)))); + } + if (debugEnabled) { + LOG.debug(" The multi list size for removing hfile references in zk for node " + peerZnode + + " is " + listOfOps.size()); + } + try { + ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); + } catch (KeeperException e) { + LOG.error("Failed to remove hfile reference znode=" + e.getPath(), e); + } + } + + @Override + public void addPeerToHFileRefs(String peerId) throws ReplicationException { + String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + try { + if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) { + LOG.info("Adding peer " + peerId + " to hfile reference queue."); + ZKUtil.createWithParents(this.zookeeper, peerZnode); + } + } catch (KeeperException e) { + throw new ReplicationException("Failed to add peer " + peerId + " to hfile reference queue.", + e); + } + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java index 4fbac0f623e..762167f1db7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java @@ -20,9 +20,10 @@ package org.apache.hadoop.hbase.replication; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.zookeeper.ZKConfig; @@ -48,32 +49,43 @@ public abstract class ReplicationStateZKBase { protected final String peersZNode; /** The name of the znode that contains all replication queues */ protected final String queuesZNode; + /** The name of the znode that contains queues of hfile references to be replicated */ + protected final String hfileRefsZNode; /** The cluster key of the local cluster */ protected final String ourClusterKey; protected final ZooKeeperWatcher zookeeper; protected final Configuration conf; protected final Abortable abortable; + protected final boolean replicationForBulkLoadEnabled; // Public for testing public static final byte[] ENABLED_ZNODE_BYTES = toByteArray(ZooKeeperProtos.ReplicationState.State.ENABLED); public static final byte[] DISABLED_ZNODE_BYTES = toByteArray(ZooKeeperProtos.ReplicationState.State.DISABLED); + public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY = + "zookeeper.znode.replication.hfile.refs"; + public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = "hfile-refs"; public ReplicationStateZKBase(ZooKeeperWatcher zookeeper, Configuration conf, Abortable abortable) { this.zookeeper = zookeeper; this.conf = conf; this.abortable = abortable; + this.replicationForBulkLoadEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication"); String peersZNodeName = conf.get("zookeeper.znode.replication.peers", "peers"); String queuesZNodeName = conf.get("zookeeper.znode.replication.rs", "rs"); + String hfileRefsZNodeName = conf.get(ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY, + ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT); this.peerStateNodeName = conf.get("zookeeper.znode.replication.peers.state", "peer-state"); this.ourClusterKey = ZKConfig.getZooKeeperClusterKey(this.conf); this.replicationZNode = ZKUtil.joinZNode(this.zookeeper.baseZNode, replicationZNodeName); this.peersZNode = ZKUtil.joinZNode(replicationZNode, peersZNodeName); this.queuesZNode = ZKUtil.joinZNode(replicationZNode, queuesZNodeName); + this.hfileRefsZNode = ZKUtil.joinZNode(replicationZNode, hfileRefsZNodeName); } public List getListOfReplicators() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index bf803bea90b..3d660cb2662 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.replication.ReplicationStateZKBase; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; @@ -886,7 +887,7 @@ public class ZKUtil { JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null && conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null && conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null) { - + return false; } } catch(Exception e) { @@ -1799,6 +1800,27 @@ public class ZKUtil { } else if (child.equals(zkw.getConfiguration(). get("zookeeper.znode.replication.rs", "rs"))) { appendRSZnodes(zkw, znode, sb); + } else if (child.equals(zkw.getConfiguration().get( + ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY, + ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT))) { + appendHFileRefsZnodes(zkw, znode, sb); + } + } + } + + private static void appendHFileRefsZnodes(ZooKeeperWatcher zkw, String hfileRefsZnode, + StringBuilder sb) throws KeeperException { + sb.append("\n").append(hfileRefsZnode).append(": "); + for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, hfileRefsZnode)) { + String znodeToProcess = ZKUtil.joinZNode(hfileRefsZnode, peerIdZnode); + sb.append("\n").append(znodeToProcess).append(": "); + List peerHFileRefsZnodes = ZKUtil.listChildrenNoWatch(zkw, znodeToProcess); + int size = peerHFileRefsZnodes.size(); + for (int i = 0; i < size; i++) { + sb.append(peerHFileRefsZnodes.get(i)); + if (i != size - 1) { + sb.append(", "); + } } } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index a5c1d5c693f..c466d8b54ba 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -835,6 +835,18 @@ public final class HConstants { REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service"; public static final String REPLICATION_SERVICE_CLASSNAME_DEFAULT = "org.apache.hadoop.hbase.replication.regionserver.Replication"; + public static final String REPLICATION_BULKLOAD_ENABLE_KEY = "hbase.replication.bulkload.enabled"; + public static final boolean REPLICATION_BULKLOAD_ENABLE_DEFAULT = false; + /** Replication cluster id of source cluster which uniquely identifies itself with peer cluster */ + public static final String REPLICATION_CLUSTER_ID = "hbase.replication.cluster.id"; + /** + * Directory where the source cluster file system client configuration are placed which is used by + * sink cluster to copy HFiles from source cluster file system + */ + public static final String REPLICATION_CONF_DIR = "hbase.replication.conf.dir"; + + /** Maximum time to retry for a failed bulk load request */ + public static final String BULKLOAD_MAX_RETRIES_NUMBER = "hbase.bulkload.retries.number"; /** HBCK special code name used as server name when manipulating ZK nodes */ public static final String HBCK_CODE_NAME = "HBCKServerName"; @@ -1231,7 +1243,7 @@ public final class HConstants { public static final String HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY = "hbase.canary.write.table.check.period"; - + /** * Configuration keys for programmatic JAAS configuration for secured ZK interaction */ @@ -1240,7 +1252,7 @@ public final class HConstants { "hbase.zookeeper.client.kerberos.principal"; public static final String ZK_SERVER_KEYTAB_FILE = "hbase.zookeeper.server.keytab.file"; public static final String ZK_SERVER_KERBEROS_PRINCIPAL = - "hbase.zookeeper.server.kerberos.principal"; + "hbase.zookeeper.server.kerberos.principal"; private HConstants() { // Can't be instantiated with this ctor. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java index 698a59a2acb..9fb8415d0c9 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java @@ -22,9 +22,11 @@ public interface MetricsReplicationSinkSource { public static final String SINK_AGE_OF_LAST_APPLIED_OP = "sink.ageOfLastAppliedOp"; public static final String SINK_APPLIED_BATCHES = "sink.appliedBatches"; public static final String SINK_APPLIED_OPS = "sink.appliedOps"; + public static final String SINK_APPLIED_HFILES = "sink.appliedHFiles"; void setLastAppliedOpAge(long age); void incrAppliedBatches(long batches); void incrAppliedOps(long batchsize); long getLastAppliedOpAge(); + void incrAppliedHFiles(long hfileSize); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java index fecf191a063..188c3a3f658 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java @@ -32,6 +32,9 @@ public interface MetricsReplicationSourceSource { public static final String SOURCE_LOG_EDITS_FILTERED = "source.logEditsFiltered"; + public static final String SOURCE_SHIPPED_HFILES = "source.shippedHFiles"; + public static final String SOURCE_SIZE_OF_HFILE_REFS_QUEUE = "source.sizeOfHFileRefsQueue"; + void setLastShippedAge(long age); void setSizeOfLogQueue(int size); void incrSizeOfLogQueue(int size); @@ -44,4 +47,7 @@ public interface MetricsReplicationSourceSource { void incrLogReadInEdits(long size); void clear(); long getLastShippedAge(); + void incrHFilesShipped(long hfiles); + void incrSizeOfHFileRefsQueue(long size); + void decrSizeOfHFileRefsQueue(long size); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java index 6dace107f99..392cd39c04e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java @@ -32,6 +32,8 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS private final MutableCounterLong shippedOpsCounter; private final MutableCounterLong shippedKBsCounter; private final MutableCounterLong logReadInBytesCounter; + private final MutableCounterLong shippedHFilesCounter; + private final MutableGaugeLong sizeOfHFileRefsQueueGauge; public MetricsReplicationGlobalSourceSource(MetricsReplicationSourceImpl rms) { this.rms = rms; @@ -51,6 +53,11 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS logReadInEditsCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_LOG_READ_IN_EDITS, 0L); logEditsFilteredCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_LOG_EDITS_FILTERED, 0L); + + shippedHFilesCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_SHIPPED_HFILES, 0L); + + sizeOfHFileRefsQueueGauge = + rms.getMetricsRegistry().getLongGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); } @Override public void setLastShippedAge(long age) { @@ -100,4 +107,18 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS public long getLastShippedAge() { return ageOfLastShippedOpGauge.value(); } + + @Override public void incrHFilesShipped(long hfiles) { + shippedHFilesCounter.incr(hfiles); + } + + @Override + public void incrSizeOfHFileRefsQueue(long size) { + sizeOfHFileRefsQueueGauge.incr(size); + } + + @Override + public void decrSizeOfHFileRefsQueue(long size) { + sizeOfHFileRefsQueueGauge.decr(size); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java index 14212ba0869..8f4a3375417 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java @@ -26,11 +26,13 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS private final MutableGaugeLong ageGauge; private final MutableCounterLong batchesCounter; private final MutableCounterLong opsCounter; + private final MutableCounterLong hfilesCounter; public MetricsReplicationSinkSourceImpl(MetricsReplicationSourceImpl rms) { ageGauge = rms.getMetricsRegistry().getLongGauge(SINK_AGE_OF_LAST_APPLIED_OP, 0L); batchesCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_BATCHES, 0L); opsCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_OPS, 0L); + hfilesCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_HFILES, 0L); } @Override public void setLastAppliedOpAge(long age) { @@ -49,4 +51,9 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS public long getLastAppliedOpAge() { return ageGauge.value(); } + + @Override + public void incrAppliedHFiles(long hfiles) { + hfilesCounter.incr(hfiles); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java index 1422e7e1cd3..217cc3e2a46 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java @@ -32,6 +32,8 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou private final String shippedOpsKey; private final String shippedKBsKey; private final String logReadInBytesKey; + private final String shippedHFilesKey; + private final String sizeOfHFileRefsQueueKey; private final MutableGaugeLong ageOfLastShippedOpGauge; private final MutableGaugeLong sizeOfLogQueueGauge; @@ -41,6 +43,8 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou private final MutableCounterLong shippedOpsCounter; private final MutableCounterLong shippedKBsCounter; private final MutableCounterLong logReadInBytesCounter; + private final MutableCounterLong shippedHFilesCounter; + private final MutableGaugeLong sizeOfHFileRefsQueueGauge; public MetricsReplicationSourceSourceImpl(MetricsReplicationSourceImpl rms, String id) { this.rms = rms; @@ -69,6 +73,12 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou logEditsFilteredKey = "source." + id + ".logEditsFiltered"; logEditsFilteredCounter = rms.getMetricsRegistry().getLongCounter(logEditsFilteredKey, 0L); + + shippedHFilesKey = "source." + this.id + ".shippedHFiles"; + shippedHFilesCounter = rms.getMetricsRegistry().getLongCounter(shippedHFilesKey, 0L); + + sizeOfHFileRefsQueueKey = "source." + id + ".sizeOfHFileRefsQueue"; + sizeOfHFileRefsQueueGauge = rms.getMetricsRegistry().getLongGauge(sizeOfHFileRefsQueueKey, 0L); } @Override public void setLastShippedAge(long age) { @@ -124,10 +134,28 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou rms.removeMetric(logReadInEditsKey); rms.removeMetric(logEditsFilteredKey); + + rms.removeMetric(shippedHFilesKey); + rms.removeMetric(sizeOfHFileRefsQueueKey); } @Override public long getLastShippedAge() { return ageOfLastShippedOpGauge.value(); } + + @Override + public void incrHFilesShipped(long hfiles) { + shippedHFilesCounter.incr(hfiles); + } + + @Override + public void incrSizeOfHFileRefsQueue(long size) { + sizeOfHFileRefsQueueGauge.incr(size); + } + + @Override + public void decrSizeOfHFileRefsQueue(long size) { + sizeOfHFileRefsQueueGauge.decr(size); + } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index b4c378b0c3d..1c59ea60789 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -16896,6 +16896,51 @@ public final class AdminProtos { */ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getEntryOrBuilder( int index); + + // optional string replicationClusterId = 2; + /** + * optional string replicationClusterId = 2; + */ + boolean hasReplicationClusterId(); + /** + * optional string replicationClusterId = 2; + */ + java.lang.String getReplicationClusterId(); + /** + * optional string replicationClusterId = 2; + */ + com.google.protobuf.ByteString + getReplicationClusterIdBytes(); + + // optional string sourceBaseNamespaceDirPath = 3; + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + boolean hasSourceBaseNamespaceDirPath(); + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + java.lang.String getSourceBaseNamespaceDirPath(); + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + com.google.protobuf.ByteString + getSourceBaseNamespaceDirPathBytes(); + + // optional string sourceHFileArchiveDirPath = 4; + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + boolean hasSourceHFileArchiveDirPath(); + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + java.lang.String getSourceHFileArchiveDirPath(); + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + com.google.protobuf.ByteString + getSourceHFileArchiveDirPathBytes(); } /** * Protobuf type {@code hbase.pb.ReplicateWALEntryRequest} @@ -16963,6 +17008,21 @@ public final class AdminProtos { entry_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.PARSER, extensionRegistry)); break; } + case 18: { + bitField0_ |= 0x00000001; + replicationClusterId_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000002; + sourceBaseNamespaceDirPath_ = input.readBytes(); + break; + } + case 34: { + bitField0_ |= 0x00000004; + sourceHFileArchiveDirPath_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17005,6 +17065,7 @@ public final class AdminProtos { return PARSER; } + private int bitField0_; // repeated .hbase.pb.WALEntry entry = 1; public static final int ENTRY_FIELD_NUMBER = 1; private java.util.List entry_; @@ -17041,8 +17102,140 @@ public final class AdminProtos { return entry_.get(index); } + // optional string replicationClusterId = 2; + public static final int REPLICATIONCLUSTERID_FIELD_NUMBER = 2; + private java.lang.Object replicationClusterId_; + /** + * optional string replicationClusterId = 2; + */ + public boolean hasReplicationClusterId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string replicationClusterId = 2; + */ + public java.lang.String getReplicationClusterId() { + java.lang.Object ref = replicationClusterId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + replicationClusterId_ = s; + } + return s; + } + } + /** + * optional string replicationClusterId = 2; + */ + public com.google.protobuf.ByteString + getReplicationClusterIdBytes() { + java.lang.Object ref = replicationClusterId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + replicationClusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string sourceBaseNamespaceDirPath = 3; + public static final int SOURCEBASENAMESPACEDIRPATH_FIELD_NUMBER = 3; + private java.lang.Object sourceBaseNamespaceDirPath_; + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public boolean hasSourceBaseNamespaceDirPath() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public java.lang.String getSourceBaseNamespaceDirPath() { + java.lang.Object ref = sourceBaseNamespaceDirPath_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + sourceBaseNamespaceDirPath_ = s; + } + return s; + } + } + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public com.google.protobuf.ByteString + getSourceBaseNamespaceDirPathBytes() { + java.lang.Object ref = sourceBaseNamespaceDirPath_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + sourceBaseNamespaceDirPath_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string sourceHFileArchiveDirPath = 4; + public static final int SOURCEHFILEARCHIVEDIRPATH_FIELD_NUMBER = 4; + private java.lang.Object sourceHFileArchiveDirPath_; + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public boolean hasSourceHFileArchiveDirPath() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public java.lang.String getSourceHFileArchiveDirPath() { + java.lang.Object ref = sourceHFileArchiveDirPath_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + sourceHFileArchiveDirPath_ = s; + } + return s; + } + } + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public com.google.protobuf.ByteString + getSourceHFileArchiveDirPathBytes() { + java.lang.Object ref = sourceHFileArchiveDirPath_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + sourceHFileArchiveDirPath_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private void initFields() { entry_ = java.util.Collections.emptyList(); + replicationClusterId_ = ""; + sourceBaseNamespaceDirPath_ = ""; + sourceHFileArchiveDirPath_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17065,6 +17258,15 @@ public final class AdminProtos { for (int i = 0; i < entry_.size(); i++) { output.writeMessage(1, entry_.get(i)); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(2, getReplicationClusterIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getSourceBaseNamespaceDirPathBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(4, getSourceHFileArchiveDirPathBytes()); + } getUnknownFields().writeTo(output); } @@ -17078,6 +17280,18 @@ public final class AdminProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, entry_.get(i)); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getReplicationClusterIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getSourceBaseNamespaceDirPathBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getSourceHFileArchiveDirPathBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -17103,6 +17317,21 @@ public final class AdminProtos { boolean result = true; result = result && getEntryList() .equals(other.getEntryList()); + result = result && (hasReplicationClusterId() == other.hasReplicationClusterId()); + if (hasReplicationClusterId()) { + result = result && getReplicationClusterId() + .equals(other.getReplicationClusterId()); + } + result = result && (hasSourceBaseNamespaceDirPath() == other.hasSourceBaseNamespaceDirPath()); + if (hasSourceBaseNamespaceDirPath()) { + result = result && getSourceBaseNamespaceDirPath() + .equals(other.getSourceBaseNamespaceDirPath()); + } + result = result && (hasSourceHFileArchiveDirPath() == other.hasSourceHFileArchiveDirPath()); + if (hasSourceHFileArchiveDirPath()) { + result = result && getSourceHFileArchiveDirPath() + .equals(other.getSourceHFileArchiveDirPath()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -17120,6 +17349,18 @@ public final class AdminProtos { hash = (37 * hash) + ENTRY_FIELD_NUMBER; hash = (53 * hash) + getEntryList().hashCode(); } + if (hasReplicationClusterId()) { + hash = (37 * hash) + REPLICATIONCLUSTERID_FIELD_NUMBER; + hash = (53 * hash) + getReplicationClusterId().hashCode(); + } + if (hasSourceBaseNamespaceDirPath()) { + hash = (37 * hash) + SOURCEBASENAMESPACEDIRPATH_FIELD_NUMBER; + hash = (53 * hash) + getSourceBaseNamespaceDirPath().hashCode(); + } + if (hasSourceHFileArchiveDirPath()) { + hash = (37 * hash) + SOURCEHFILEARCHIVEDIRPATH_FIELD_NUMBER; + hash = (53 * hash) + getSourceHFileArchiveDirPath().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -17243,6 +17484,12 @@ public final class AdminProtos { } else { entryBuilder_.clear(); } + replicationClusterId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + sourceBaseNamespaceDirPath_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + sourceHFileArchiveDirPath_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -17270,6 +17517,7 @@ public final class AdminProtos { public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest(this); int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; if (entryBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { entry_ = java.util.Collections.unmodifiableList(entry_); @@ -17279,6 +17527,19 @@ public final class AdminProtos { } else { result.entry_ = entryBuilder_.build(); } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.replicationClusterId_ = replicationClusterId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.sourceBaseNamespaceDirPath_ = sourceBaseNamespaceDirPath_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.sourceHFileArchiveDirPath_ = sourceHFileArchiveDirPath_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -17320,6 +17581,21 @@ public final class AdminProtos { } } } + if (other.hasReplicationClusterId()) { + bitField0_ |= 0x00000002; + replicationClusterId_ = other.replicationClusterId_; + onChanged(); + } + if (other.hasSourceBaseNamespaceDirPath()) { + bitField0_ |= 0x00000004; + sourceBaseNamespaceDirPath_ = other.sourceBaseNamespaceDirPath_; + onChanged(); + } + if (other.hasSourceHFileArchiveDirPath()) { + bitField0_ |= 0x00000008; + sourceHFileArchiveDirPath_ = other.sourceHFileArchiveDirPath_; + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -17593,6 +17869,228 @@ public final class AdminProtos { return entryBuilder_; } + // optional string replicationClusterId = 2; + private java.lang.Object replicationClusterId_ = ""; + /** + * optional string replicationClusterId = 2; + */ + public boolean hasReplicationClusterId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string replicationClusterId = 2; + */ + public java.lang.String getReplicationClusterId() { + java.lang.Object ref = replicationClusterId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + replicationClusterId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string replicationClusterId = 2; + */ + public com.google.protobuf.ByteString + getReplicationClusterIdBytes() { + java.lang.Object ref = replicationClusterId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + replicationClusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string replicationClusterId = 2; + */ + public Builder setReplicationClusterId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + replicationClusterId_ = value; + onChanged(); + return this; + } + /** + * optional string replicationClusterId = 2; + */ + public Builder clearReplicationClusterId() { + bitField0_ = (bitField0_ & ~0x00000002); + replicationClusterId_ = getDefaultInstance().getReplicationClusterId(); + onChanged(); + return this; + } + /** + * optional string replicationClusterId = 2; + */ + public Builder setReplicationClusterIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + replicationClusterId_ = value; + onChanged(); + return this; + } + + // optional string sourceBaseNamespaceDirPath = 3; + private java.lang.Object sourceBaseNamespaceDirPath_ = ""; + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public boolean hasSourceBaseNamespaceDirPath() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public java.lang.String getSourceBaseNamespaceDirPath() { + java.lang.Object ref = sourceBaseNamespaceDirPath_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + sourceBaseNamespaceDirPath_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public com.google.protobuf.ByteString + getSourceBaseNamespaceDirPathBytes() { + java.lang.Object ref = sourceBaseNamespaceDirPath_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + sourceBaseNamespaceDirPath_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public Builder setSourceBaseNamespaceDirPath( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + sourceBaseNamespaceDirPath_ = value; + onChanged(); + return this; + } + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public Builder clearSourceBaseNamespaceDirPath() { + bitField0_ = (bitField0_ & ~0x00000004); + sourceBaseNamespaceDirPath_ = getDefaultInstance().getSourceBaseNamespaceDirPath(); + onChanged(); + return this; + } + /** + * optional string sourceBaseNamespaceDirPath = 3; + */ + public Builder setSourceBaseNamespaceDirPathBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + sourceBaseNamespaceDirPath_ = value; + onChanged(); + return this; + } + + // optional string sourceHFileArchiveDirPath = 4; + private java.lang.Object sourceHFileArchiveDirPath_ = ""; + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public boolean hasSourceHFileArchiveDirPath() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public java.lang.String getSourceHFileArchiveDirPath() { + java.lang.Object ref = sourceHFileArchiveDirPath_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + sourceHFileArchiveDirPath_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public com.google.protobuf.ByteString + getSourceHFileArchiveDirPathBytes() { + java.lang.Object ref = sourceHFileArchiveDirPath_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + sourceHFileArchiveDirPath_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public Builder setSourceHFileArchiveDirPath( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + sourceHFileArchiveDirPath_ = value; + onChanged(); + return this; + } + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public Builder clearSourceHFileArchiveDirPath() { + bitField0_ = (bitField0_ & ~0x00000008); + sourceHFileArchiveDirPath_ = getDefaultInstance().getSourceHFileArchiveDirPath(); + onChanged(); + return this; + } + /** + * optional string sourceHFileArchiveDirPath = 4; + */ + public Builder setSourceHFileArchiveDirPathBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + sourceHFileArchiveDirPath_ = value; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicateWALEntryRequest) } @@ -23539,56 +24037,58 @@ public final class AdminProtos { "ster_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRe" + "sponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase." + "pb.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025as", - "sociated_cell_count\030\003 \001(\005\"=\n\030ReplicateWA" + - "LEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb." + - "WALEntry\"\033\n\031ReplicateWALEntryResponse\"\026\n" + - "\024RollWALWriterRequest\"0\n\025RollWALWriterRe" + - "sponse\022\027\n\017region_to_flush\030\001 \003(\014\"#\n\021StopS" + - "erverRequest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServ" + - "erResponse\"\026\n\024GetServerInfoRequest\"K\n\nSe" + - "rverInfo\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb" + - ".ServerName\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025GetSe" + - "rverInfoResponse\022)\n\013server_info\030\001 \002(\0132\024.", - "hbase.pb.ServerInfo\"\034\n\032UpdateConfigurati" + - "onRequest\"\035\n\033UpdateConfigurationResponse" + - "2\207\013\n\014AdminService\022P\n\rGetRegionInfo\022\036.hba" + - "se.pb.GetRegionInfoRequest\032\037.hbase.pb.Ge" + - "tRegionInfoResponse\022M\n\014GetStoreFile\022\035.hb" + - "ase.pb.GetStoreFileRequest\032\036.hbase.pb.Ge" + - "tStoreFileResponse\022V\n\017GetOnlineRegion\022 ." + - "hbase.pb.GetOnlineRegionRequest\032!.hbase." + - "pb.GetOnlineRegionResponse\022G\n\nOpenRegion" + - "\022\033.hbase.pb.OpenRegionRequest\032\034.hbase.pb", - ".OpenRegionResponse\022M\n\014WarmupRegion\022\035.hb" + - "ase.pb.WarmupRegionRequest\032\036.hbase.pb.Wa" + - "rmupRegionResponse\022J\n\013CloseRegion\022\034.hbas" + - "e.pb.CloseRegionRequest\032\035.hbase.pb.Close" + - "RegionResponse\022J\n\013FlushRegion\022\034.hbase.pb" + - ".FlushRegionRequest\032\035.hbase.pb.FlushRegi" + - "onResponse\022J\n\013SplitRegion\022\034.hbase.pb.Spl" + - "itRegionRequest\032\035.hbase.pb.SplitRegionRe" + - "sponse\022P\n\rCompactRegion\022\036.hbase.pb.Compa" + - "ctRegionRequest\032\037.hbase.pb.CompactRegion", - "Response\022M\n\014MergeRegions\022\035.hbase.pb.Merg" + - "eRegionsRequest\032\036.hbase.pb.MergeRegionsR" + - "esponse\022\\\n\021ReplicateWALEntry\022\".hbase.pb." + - "ReplicateWALEntryRequest\032#.hbase.pb.Repl" + - "icateWALEntryResponse\022Q\n\006Replay\022\".hbase." + - "pb.ReplicateWALEntryRequest\032#.hbase.pb.R" + - "eplicateWALEntryResponse\022P\n\rRollWALWrite" + - "r\022\036.hbase.pb.RollWALWriterRequest\032\037.hbas" + - "e.pb.RollWALWriterResponse\022P\n\rGetServerI" + - "nfo\022\036.hbase.pb.GetServerInfoRequest\032\037.hb", - "ase.pb.GetServerInfoResponse\022G\n\nStopServ" + - "er\022\033.hbase.pb.StopServerRequest\032\034.hbase." + - "pb.StopServerResponse\022_\n\022UpdateFavoredNo" + - "des\022#.hbase.pb.UpdateFavoredNodesRequest" + - "\032$.hbase.pb.UpdateFavoredNodesResponse\022b" + - "\n\023UpdateConfiguration\022$.hbase.pb.UpdateC" + - "onfigurationRequest\032%.hbase.pb.UpdateCon" + - "figurationResponseBA\n*org.apache.hadoop." + - "hbase.protobuf.generatedB\013AdminProtosH\001\210" + - "\001\001\240\001\001" + "sociated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateW" + + "ALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb" + + ".WALEntry\022\034\n\024replicationClusterId\030\002 \001(\t\022" + + "\"\n\032sourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031s" + + "ourceHFileArchiveDirPath\030\004 \001(\t\"\033\n\031Replic" + + "ateWALEntryResponse\"\026\n\024RollWALWriterRequ" + + "est\"0\n\025RollWALWriterResponse\022\027\n\017region_t" + + "o_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006re" + + "ason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetS" + + "erverInfoRequest\"K\n\nServerInfo\022)\n\013server", + "_name\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nweb" + + "ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" + + "\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" + + "o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" + + "eConfigurationResponse2\207\013\n\014AdminService\022" + + "P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" + + "oRequest\032\037.hbase.pb.GetRegionInfoRespons" + + "e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" + + "eRequest\032\036.hbase.pb.GetStoreFileResponse" + + "\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline", + "RegionRequest\032!.hbase.pb.GetOnlineRegion" + + "Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" + + "gionRequest\032\034.hbase.pb.OpenRegionRespons" + + "e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" + + "nRequest\032\036.hbase.pb.WarmupRegionResponse" + + "\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" + + "quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" + + "FlushRegion\022\034.hbase.pb.FlushRegionReques" + + "t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" + + "tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.", + "hbase.pb.SplitRegionResponse\022P\n\rCompactR" + + "egion\022\036.hbase.pb.CompactRegionRequest\032\037." + + "hbase.pb.CompactRegionResponse\022M\n\014MergeR" + + "egions\022\035.hbase.pb.MergeRegionsRequest\032\036." + + "hbase.pb.MergeRegionsResponse\022\\\n\021Replica" + + "teWALEntry\022\".hbase.pb.ReplicateWALEntryR" + + "equest\032#.hbase.pb.ReplicateWALEntryRespo" + + "nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" + + "ryRequest\032#.hbase.pb.ReplicateWALEntryRe" + + "sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW", + "ALWriterRequest\032\037.hbase.pb.RollWALWriter" + + "Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" + + "ServerInfoRequest\032\037.hbase.pb.GetServerIn" + + "foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" + + "ServerRequest\032\034.hbase.pb.StopServerRespo" + + "nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" + + "ateFavoredNodesRequest\032$.hbase.pb.Update" + + "FavoredNodesResponse\022b\n\023UpdateConfigurat" + + "ion\022$.hbase.pb.UpdateConfigurationReques" + + "t\032%.hbase.pb.UpdateConfigurationResponse", + "BA\n*org.apache.hadoop.hbase.protobuf.gen" + + "eratedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -23750,7 +24250,7 @@ public final class AdminProtos { internal_static_hbase_pb_ReplicateWALEntryRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor, - new java.lang.String[] { "Entry", }); + new java.lang.String[] { "Entry", "ReplicationClusterId", "SourceBaseNamespaceDirPath", "SourceHFileArchiveDirPath", }); internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_ReplicateWALEntryResponse_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index f7787f55f70..a1905a4e7bb 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -211,6 +211,9 @@ message WALEntry { */ message ReplicateWALEntryRequest { repeated WALEntry entry = 1; + optional string replicationClusterId = 2; + optional string sourceBaseNamespaceDirPath = 3; + optional string sourceHFileArchiveDirPath = 4; } message ReplicateWALEntryResponse { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 443cd96d9e7..d6bcd660940 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -20,66 +20,6 @@ package org.apache.hadoop.hbase.mapreduce; import static java.lang.String.format; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; -import com.google.common.collect.Multimaps; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import org.apache.commons.lang.mutable.MutableInt; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.NeedUnmanagedConnectionException; -import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.client.RegionServerCallable; -import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.coprocessor.SecureBulkLoadClient; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.io.HFileLink; -import org.apache.hadoop.hbase.io.HalfStoreFileReader; -import org.apache.hadoop.hbase.io.Reference; -import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.io.hfile.HFileContext; -import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFileInfo; -import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.security.token.FsDelegationToken; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSHDFSUtils; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; @@ -106,6 +46,64 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import org.apache.commons.lang.mutable.MutableInt; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.RegionServerCallable; +import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.coprocessor.SecureBulkLoadClient; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.io.HalfStoreFileReader; +import org.apache.hadoop.hbase.io.Reference; +import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint; +import org.apache.hadoop.hbase.security.token.FsDelegationToken; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSHDFSUtils; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import com.google.common.collect.Multimaps; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * Tool to load the output of HFileOutputFormat into an existing table. * @see #usage() @@ -131,6 +129,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private FsDelegationToken fsDelegationToken; private String bulkToken; private UserProvider userProvider; + private int nrThreads; private LoadIncrementalHFiles() {} @@ -151,6 +150,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); + nrThreads = conf.getInt("hbase.loadincremental.threads.max", + Runtime.getRuntime().availableProcessors()); } } @@ -251,7 +252,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * region boundary, and each part is added back into the queue. * The import process finishes when the queue is empty. */ - static class LoadQueueItem { + public static class LoadQueueItem { final byte[] family; final Path hfilePath; @@ -343,7 +344,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * @param table the table to load into * @throws TableNotFoundException if table does not yet exist */ - @SuppressWarnings("deprecation") public void doBulkLoad(Path hfofDir, final Admin admin, Table table, RegionLocator regionLocator) throws TableNotFoundException, IOException { @@ -351,16 +351,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { throw new TableNotFoundException("Table " + table.getName() + "is not currently available."); } - // initialize thread pools - int nrThreads = getConf().getInt("hbase.loadincremental.threads.max", - Runtime.getRuntime().availableProcessors()); - ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); - builder.setNameFormat("LoadIncrementalHFiles-%1$d"); - ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, - 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), - builder.build()); - ((ThreadPoolExecutor)pool).allowCoreThreadTimeOut(true); + ExecutorService pool = createExecutorService(); // LQI queue does not need to be threadsafe -- all operations on this queue // happen in this thread @@ -377,30 +368,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { "option, consider removing the files and bulkload again without this option. " + "See HBASE-13985"); } - discoverLoadQueue(queue, hfofDir, validateHFile); - // check whether there is invalid family name in HFiles to be bulkloaded - Collection families = table.getTableDescriptor().getFamilies(); - ArrayList familyNames = new ArrayList(families.size()); - for (HColumnDescriptor family : families) { - familyNames.add(family.getNameAsString()); - } - ArrayList unmatchedFamilies = new ArrayList(); - Iterator queueIter = queue.iterator(); - while (queueIter.hasNext()) { - LoadQueueItem lqi = queueIter.next(); - String familyNameInHFile = Bytes.toString(lqi.family); - if (!familyNames.contains(familyNameInHFile)) { - unmatchedFamilies.add(familyNameInHFile); - } - } - if (unmatchedFamilies.size() > 0) { - String msg = - "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " - + unmatchedFamilies + "; valid family names of table " - + table.getName() + " are: " + familyNames; - LOG.error(msg); - throw new IOException(msg); - } + prepareHFileQueue(hfofDir, table, queue, validateHFile); + int count = 0; if (queue.isEmpty()) { @@ -427,7 +396,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { + count + " with " + queue.size() + " files remaining to group or split"); } - int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 10); + int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10); maxRetries = Math.max(maxRetries, startEndKeys.getFirst().length + 1); if (maxRetries != 0 && count >= maxRetries) { throw new IOException("Retry attempted " + count + @@ -476,6 +445,85 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } } + /** + * Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the + * passed directory and validates whether the prepared queue has all the valid table column + * families in it. + * @param hfilesDir directory containing list of hfiles to be loaded into the table + * @param table table to which hfiles should be loaded + * @param queue queue which needs to be loaded into the table + * @throws IOException If any I/O or network error occurred + */ + public void prepareHFileQueue(Path hfofDir, Table table, Deque queue, + boolean validateHFile) throws IOException { + discoverLoadQueue(queue, hfofDir, validateHFile); + validateFamiliesInHFiles(table, queue); + } + + // Initialize a thread pool + private ExecutorService createExecutorService() { + ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); + builder.setNameFormat("LoadIncrementalHFiles-%1$d"); + ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), builder.build()); + ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); + return pool; + } + + /** + * Checks whether there is any invalid family name in HFiles to be bulk loaded. + */ + private void validateFamiliesInHFiles(Table table, Deque queue) + throws IOException { + Collection families = table.getTableDescriptor().getFamilies(); + List familyNames = new ArrayList(families.size()); + for (HColumnDescriptor family : families) { + familyNames.add(family.getNameAsString()); + } + List unmatchedFamilies = new ArrayList(); + Iterator queueIter = queue.iterator(); + while (queueIter.hasNext()) { + LoadQueueItem lqi = queueIter.next(); + String familyNameInHFile = Bytes.toString(lqi.family); + if (!familyNames.contains(familyNameInHFile)) { + unmatchedFamilies.add(familyNameInHFile); + } + } + if (unmatchedFamilies.size() > 0) { + String msg = + "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + + unmatchedFamilies + "; valid family names of table " + table.getName() + " are: " + + familyNames; + LOG.error(msg); + throw new IOException(msg); + } + } + + /** + * Used by the replication sink to load the hfiles from the source cluster. It does the following, + * 1. {@link LoadIncrementalHFiles#groupOrSplitPhase(Table, ExecutorService, Deque, Pair)} 2. + * {@link + * LoadIncrementalHFiles#bulkLoadPhase(Table, Connection, ExecutorService, Deque, Multimap)} + * @param table Table to which these hfiles should be loaded to + * @param conn Connection to use + * @param queue {@link LoadQueueItem} has hfiles yet to be loaded + * @param startEndKeys starting and ending row keys of the region + */ + public void loadHFileQueue(final Table table, final Connection conn, Deque queue, + Pair startEndKeys) throws IOException { + ExecutorService pool = null; + try { + pool = createExecutorService(); + Multimap regionGroups = + groupOrSplitPhase(table, pool, queue, startEndKeys); + bulkLoadPhase(table, conn, pool, queue, regionGroups); + } finally { + if (pool != null) { + pool.shutdown(); + } + } + } + /** * This takes the LQI's grouped by likely regions and attempts to bulk load * them. Any failures are re-queued for another pass with the @@ -623,10 +671,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { String uniqueName = getUniqueName(); HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family); + Path botOut = new Path(tmpDir, uniqueName + ".bottom"); Path topOut = new Path(tmpDir, uniqueName + ".top"); - splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, - botOut, topOut); + splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut); FileSystem fs = tmpDir.getFileSystem(getConf()); fs.setPermission(tmpDir, FsPermission.valueOf("-rwxrwxrwx")); @@ -657,6 +705,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { final Pair startEndKeys) throws IOException { final Path hfilePath = item.hfilePath; + // fs is the source filesystem + if (fs == null) { + fs = hfilePath.getFileSystem(getConf()); + } HFile.Reader hfr = HFile.createReader(fs, hfilePath, new CacheConfig(getConf()), getConf()); final byte[] first, last; @@ -756,7 +808,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * failure */ protected List tryAtomicRegionLoad(final Connection conn, - final TableName tableName, final byte[] first, Collection lqis) + final TableName tableName, final byte[] first, final Collection lqis) throws IOException { final List> famPaths = new ArrayList>(lqis.size()); @@ -791,6 +843,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { //in user directory if(secureClient != null && !success) { FileSystem targetFs = FileSystem.get(getConf()); + // fs is the source filesystem + if(fs == null) { + fs = lqis.iterator().next().hfilePath.getFileSystem(getConf()); + } // Check to see if the source and target filesystems are the same // If they are the same filesystem, we will try move the files back // because previously we moved them to the staging directory. @@ -1044,4 +1100,17 @@ public class LoadIncrementalHFiles extends Configured implements Tool { System.exit(ret); } + /** + * Called from replication sink, where it manages bulkToken(staging directory) by itself. This is + * used only when {@link SecureBulkLoadEndpoint} is configured in hbase.coprocessor.region.classes + * property. This directory is used as a temporary directory where all files are initially + * copied/moved from user given directory, set all the required file permissions and then from + * their it is finally loaded into a table. This should be set only when, one would like to manage + * the staging directory by itself. Otherwise this tool will handle this by itself. + * @param stagingDir staging directory path + */ + public void setBulkToken(String stagingDir) { + this.bulkToken = stagingDir; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java index 61d1a9a7075..7a1031c0d20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java @@ -28,22 +28,23 @@ import java.util.Map; import java.util.NavigableMap; import java.util.UUID; -import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.SizedCellScanner; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; -import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.hadoop.hbase.wal.WALKey; import com.google.protobuf.ServiceException; @@ -51,15 +52,20 @@ import com.google.protobuf.ServiceException; public class ReplicationProtbufUtil { /** * A helper to replicate a list of WAL entries using admin protocol. - * - * @param admin - * @param entries + * @param admin Admin service + * @param entries Array of WAL entries to be replicated + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory + * @param sourceBaseNamespaceDir Path to source cluster base namespace directory + * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory * @throws java.io.IOException */ public static void replicateWALEntry(final AdminService.BlockingInterface admin, - final Entry[] entries) throws IOException { + final Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir, + Path sourceHFileArchiveDir) throws IOException { Pair p = - buildReplicateWALEntryRequest(entries, null); + buildReplicateWALEntryRequest(entries, null, replicationClusterId, sourceBaseNamespaceDir, + sourceHFileArchiveDir); PayloadCarryingRpcController controller = new PayloadCarryingRpcController(p.getSecond()); try { admin.replicateWALEntry(controller, p.getFirst()); @@ -78,19 +84,22 @@ public class ReplicationProtbufUtil { public static Pair buildReplicateWALEntryRequest(final Entry[] entries) { // Accumulate all the Cells seen in here. - return buildReplicateWALEntryRequest(entries, null); + return buildReplicateWALEntryRequest(entries, null, null, null, null); } /** * Create a new ReplicateWALEntryRequest from a list of HLog entries - * * @param entries the HLog entries to be replicated * @param encodedRegionName alternative region name to use if not null - * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values - * found. + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory + * @param sourceBaseNamespaceDir Path to source cluster base namespace directory + * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory + * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found. */ public static Pair - buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName) { + buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName, + String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) { // Accumulate all the KVs seen in here. List> allCells = new ArrayList>(entries.length); int size = 0; @@ -147,6 +156,17 @@ public class ReplicationProtbufUtil { entryBuilder.setAssociatedCellCount(cells.size()); builder.addEntry(entryBuilder.build()); } + + if (replicationClusterId != null) { + builder.setReplicationClusterId(replicationClusterId); + } + if (sourceBaseNamespaceDir != null) { + builder.setSourceBaseNamespaceDirPath(sourceBaseNamespaceDir.toString()); + } + if (sourceHFileArchiveDir != null) { + builder.setSourceHFileArchiveDirPath(sourceHFileArchiveDir.toString()); + } + return new Pair(builder.build(), getCellScanner(allCells, size)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 00d20aad280..ead2d25e9f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1740,7 +1740,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, List entries = request.getEntryList(); CellScanner cellScanner = ((PayloadCarryingRpcController)controller).cellScanner(); regionServer.getRegionServerCoprocessorHost().preReplicateLogEntries(entries, cellScanner); - regionServer.replicationSinkHandler.replicateLogEntries(entries, cellScanner); + regionServer.replicationSinkHandler.replicateLogEntries(entries, cellScanner, + request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), + request.getSourceHFileArchiveDirPath()); regionServer.getRegionServerCoprocessorHost().postReplicateLogEntries(entries, cellScanner); return ReplicateWALEntryResponse.newBuilder().build(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java index 5f96bf7e1cb..836d3aa655d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java @@ -36,7 +36,13 @@ public interface ReplicationSinkService extends ReplicationService { * Carry on the list of log entries down to the sink * @param entries list of WALEntries to replicate * @param cells Cells that the WALEntries refer to (if cells is non-null) + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory + * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace + * directory required for replicating hfiles + * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory * @throws IOException */ - void replicateLogEntries(List entries, CellScanner cells) throws IOException; + void replicateLogEntries(List entries, CellScanner cells, String replicationClusterId, + String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java index 457d859aef6..db98083b3ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java @@ -85,17 +85,16 @@ public interface WALActionsListener { ); /** - * * @param htd * @param logKey - * @param logEdit - * TODO: Retire this in favor of {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)} - * It only exists to get scope when replicating. Scope should be in the WALKey and not need - * us passing in a htd. + * @param logEdit TODO: Retire this in favor of + * {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)} It only exists to get + * scope when replicating. Scope should be in the WALKey and not need us passing in a + * htd. + * @throws IOException If failed to parse the WALEdit */ - void visitLogEntryBeforeWrite( - HTableDescriptor htd, WALKey logKey, WALEdit logEdit - ); + void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit) + throws IOException; /** * For notification post append to the writer. Used by metrics system at least. @@ -136,7 +135,9 @@ public interface WALActionsListener { public void visitLogEntryBeforeWrite(HRegionInfo info, WALKey logKey, WALEdit logEdit) {} @Override - public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit) {} + public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit) + throws IOException { + } @Override public void postAppend(final long entryLen, final long elapsedTimeMillis) {} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java index 166dc37210b..516ab8cbb48 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java @@ -18,11 +18,20 @@ package org.apache.hadoop.hbase.replication; +import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; import java.util.NavigableMap; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -31,6 +40,7 @@ import org.apache.hadoop.hbase.wal.WAL.Entry; */ @InterfaceAudience.Private public class ScopeWALEntryFilter implements WALEntryFilter { + private static final Log LOG = LogFactory.getLog(ScopeWALEntryFilter.class); @Override public Entry filter(Entry entry) { @@ -40,13 +50,27 @@ public class ScopeWALEntryFilter implements WALEntryFilter { } ArrayList cells = entry.getEdit().getCells(); int size = cells.size(); + byte[] fam; for (int i = size - 1; i >= 0; i--) { Cell cell = cells.get(i); - // The scope will be null or empty if - // there's nothing to replicate in that WALEdit - if (!scopes.containsKey(cell.getFamily()) - || scopes.get(cell.getFamily()) == HConstants.REPLICATION_SCOPE_LOCAL) { - cells.remove(i); + // If a bulk load entry has a scope then that means user has enabled replication for + // bulk load hfiles. + // TODO There is a similar logic in TableCfWALEntryFilter but data structures are different so + // cannot refactor into one now, can revisit and see if any way to unify them. + if (CellUtil.matchingColumn(cell, WALEdit.METAFAMILY, WALEdit.BULK_LOAD)) { + Cell filteredBulkLoadEntryCell = filterBulkLoadEntries(scopes, cell); + if (filteredBulkLoadEntryCell != null) { + cells.set(i, filteredBulkLoadEntryCell); + } else { + cells.remove(i); + } + } else { + // The scope will be null or empty if + // there's nothing to replicate in that WALEdit + fam = CellUtil.cloneFamily(cell); + if (!scopes.containsKey(fam) || scopes.get(fam) == HConstants.REPLICATION_SCOPE_LOCAL) { + cells.remove(i); + } } } if (cells.size() < size / 2) { @@ -55,4 +79,41 @@ public class ScopeWALEntryFilter implements WALEntryFilter { return entry; } + private Cell filterBulkLoadEntries(NavigableMap scopes, Cell cell) { + byte[] fam; + BulkLoadDescriptor bld = null; + try { + bld = WALEdit.getBulkLoadDescriptor(cell); + } catch (IOException e) { + LOG.warn("Failed to get bulk load events information from the WAL file.", e); + return cell; + } + List storesList = bld.getStoresList(); + // Copy the StoreDescriptor list and update it as storesList is a unmodifiableList + List copiedStoresList = new ArrayList(storesList); + Iterator copiedStoresListIterator = copiedStoresList.iterator(); + boolean anyStoreRemoved = false; + while (copiedStoresListIterator.hasNext()) { + StoreDescriptor sd = copiedStoresListIterator.next(); + fam = sd.getFamilyName().toByteArray(); + if (!scopes.containsKey(fam) || scopes.get(fam) == HConstants.REPLICATION_SCOPE_LOCAL) { + copiedStoresListIterator.remove(); + anyStoreRemoved = true; + } + } + + if (!anyStoreRemoved) { + return cell; + } else if (copiedStoresList.isEmpty()) { + return null; + } + BulkLoadDescriptor.Builder newDesc = + BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName()) + .setEncodedRegionName(bld.getEncodedRegionName()) + .setBulkloadSeqNum(bld.getBulkloadSeqNum()); + newDesc.addAllStores(copiedStoresList); + BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build(); + return CellUtil.createCell(CellUtil.cloneRow(cell), WALEdit.METAFAMILY, WALEdit.BULK_LOAD, + cell.getTimestamp(), cell.getTypeByte(), newBulkLoadDescriptor.toByteArray()); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java index b8925125423..6c2a752d7cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java @@ -18,14 +18,20 @@ package org.apache.hadoop.hbase.replication; +import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.util.Bytes; @@ -51,19 +57,37 @@ public class TableCfWALEntryFilter implements WALEntryFilter { ", degenerate as if it's not configured by keeping tableCFs==null"); } int size = cells.size(); - + + // If null means user has explicitly not configured any table CFs so all the tables data are + // applicable for replication + if (tableCFs == null) { + return entry; + } // return null(prevent replicating) if logKey's table isn't in this peer's - // replicable table list (empty tableCFs means all table are replicable) - if (tableCFs != null && !tableCFs.containsKey(tabName)) { + // replicable table list + if (!tableCFs.containsKey(tabName)) { return null; } else { - List cfs = (tableCFs == null) ? null : tableCFs.get(tabName); + List cfs = tableCFs.get(tabName); for (int i = size - 1; i >= 0; i--) { Cell cell = cells.get(i); - // ignore(remove) kv if its cf isn't in the replicable cf list - // (empty cfs means all cfs of this table are replicable) - if ((cfs != null && !cfs.contains(Bytes.toString(cell.getFamily())))) { - cells.remove(i); + // TODO There is a similar logic in ScopeWALEntryFilter but data structures are different so + // cannot refactor into one now, can revisit and see if any way to unify them. + // Filter bulk load entries separately + if (CellUtil.matchingColumn(cell, WALEdit.METAFAMILY, WALEdit.BULK_LOAD)) { + Cell filteredBulkLoadEntryCell = filterBulkLoadEntries(cfs, cell); + if (filteredBulkLoadEntryCell != null) { + cells.set(i, filteredBulkLoadEntryCell); + } else { + cells.remove(i); + } + } else { + // ignore(remove) kv if its cf isn't in the replicable cf list + // (empty cfs means all cfs of this table are replicable) + if ((cfs != null) && !cfs.contains(Bytes.toString(cell.getFamilyArray(), + cell.getFamilyOffset(), cell.getFamilyLength()))) { + cells.remove(i); + } } } } @@ -73,4 +97,41 @@ public class TableCfWALEntryFilter implements WALEntryFilter { return entry; } + private Cell filterBulkLoadEntries(List cfs, Cell cell) { + byte[] fam; + BulkLoadDescriptor bld = null; + try { + bld = WALEdit.getBulkLoadDescriptor(cell); + } catch (IOException e) { + LOG.warn("Failed to get bulk load events information from the WAL file.", e); + return cell; + } + List storesList = bld.getStoresList(); + // Copy the StoreDescriptor list and update it as storesList is a unmodifiableList + List copiedStoresList = new ArrayList(storesList); + Iterator copiedStoresListIterator = copiedStoresList.iterator(); + boolean anyStoreRemoved = false; + while (copiedStoresListIterator.hasNext()) { + StoreDescriptor sd = copiedStoresListIterator.next(); + fam = sd.getFamilyName().toByteArray(); + if (cfs != null && !cfs.contains(Bytes.toString(fam))) { + copiedStoresListIterator.remove(); + anyStoreRemoved = true; + } + } + + if (!anyStoreRemoved) { + return cell; + } else if (copiedStoresList.isEmpty()) { + return null; + } + BulkLoadDescriptor.Builder newDesc = + BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName()) + .setEncodedRegionName(bld.getEncodedRegionName()) + .setBulkloadSeqNum(bld.getBulkloadSeqNum()); + newDesc.addAllStores(copiedStoresList); + BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build(); + return CellUtil.createCell(CellUtil.cloneRow(cell), WALEdit.METAFAMILY, WALEdit.BULK_LOAD, + cell.getTimestamp(), cell.getTypeByte(), newBulkLoadDescriptor.toByteArray()); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java new file mode 100644 index 00000000000..9bfea4b0906 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication.master; + +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import com.google.common.collect.Sets; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; +import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; +import org.apache.hadoop.hbase.replication.ReplicationFactory; +import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +/** + * Implementation of a file cleaner that checks if a hfile is still scheduled for replication before + * deleting it from hfile archive directory. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate implements Abortable { + private static final Log LOG = LogFactory.getLog(ReplicationHFileCleaner.class); + private ZooKeeperWatcher zkw; + private ReplicationQueuesClient rqc; + private boolean stopped = false; + private boolean aborted; + + @Override + public Iterable getDeletableFiles(Iterable files) { + // all members of this class are null if replication is disabled, + // so we cannot filter the files + if (this.getConf() == null) { + return files; + } + + final Set hfileRefs; + try { + // The concurrently created new hfile entries in ZK may not be included in the return list, + // but they won't be deleted because they're not in the checking set. + hfileRefs = loadHFileRefsFromPeers(); + } catch (KeeperException e) { + LOG.warn("Failed to read hfile references from zookeeper, skipping checking deletable files"); + return Collections.emptyList(); + } + return Iterables.filter(files, new Predicate() { + @Override + public boolean apply(FileStatus file) { + String hfile = file.getPath().getName(); + boolean foundHFileRefInQueue = hfileRefs.contains(hfile); + if (LOG.isDebugEnabled()) { + if (foundHFileRefInQueue) { + LOG.debug("Found hfile reference in ZK, keeping: " + hfile); + } else { + LOG.debug("Did not find hfile reference in ZK, deleting: " + hfile); + } + } + return !foundHFileRefInQueue; + } + }); + } + + /** + * Load all hfile references in all replication queues from ZK. This method guarantees to return a + * snapshot which contains all hfile references in the zookeeper at the start of this call. + * However, some newly created hfile references during the call may not be included. + */ + private Set loadHFileRefsFromPeers() throws KeeperException { + Set hfileRefs = Sets.newHashSet(); + List listOfPeers; + for (int retry = 0;; retry++) { + int v0 = rqc.getHFileRefsNodeChangeVersion(); + hfileRefs.clear(); + listOfPeers = rqc.getAllPeersFromHFileRefsQueue(); + if (listOfPeers == null) { + LOG.debug("Didn't find any peers with hfile references, won't prevent any deletions."); + return ImmutableSet.of(); + } + for (String id : listOfPeers) { + List peerHFileRefs = rqc.getReplicableHFiles(id); + if (peerHFileRefs != null) { + hfileRefs.addAll(peerHFileRefs); + } + } + int v1 = rqc.getHFileRefsNodeChangeVersion(); + if (v0 == v1) { + return hfileRefs; + } + LOG.debug(String.format("Replication hfile references node cversion changed from " + + "%d to %d, retry = %d", v0, v1, retry)); + } + } + + @Override + public void setConf(Configuration config) { + // If either replication or replication of bulk load hfiles is disabled, keep all members null + if (!(config.getBoolean(HConstants.REPLICATION_ENABLE_KEY, + HConstants.REPLICATION_ENABLE_DEFAULT) && config.getBoolean( + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT))) { + LOG.warn(HConstants.REPLICATION_ENABLE_KEY + + " is not enabled so allowing all hfile references to be deleted. Better to remove " + + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS + + " configuration."); + return; + } + // Make my own Configuration. Then I'll have my own connection to zk that + // I can close myself when time comes. + Configuration conf = new Configuration(config); + super.setConf(conf); + try { + initReplicationQueuesClient(conf); + } catch (IOException e) { + LOG.error("Error while configuring " + this.getClass().getName(), e); + } + } + + private void initReplicationQueuesClient(Configuration conf) + throws ZooKeeperConnectionException, IOException { + this.zkw = new ZooKeeperWatcher(conf, "replicationHFileCleaner", null); + this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, this); + } + + @Override + public void stop(String why) { + if (this.stopped) { + return; + } + this.stopped = true; + if (this.zkw != null) { + LOG.info("Stopping " + this.zkw); + this.zkw.close(); + } + } + + @Override + public boolean isStopped() { + return this.stopped; + } + + @Override + public void abort(String why, Throwable e) { + LOG.warn("Aborting ReplicationHFileCleaner because " + why, e); + this.aborted = true; + stop(why); + } + + @Override + public boolean isAborted() { + return this.aborted; + } + + @Override + public boolean isFileDeletable(FileStatus fStat) { + Set hfileRefsFromQueue; + // all members of this class are null if replication is disabled, + // so do not stop from deleting the file + if (getConf() == null) { + return true; + } + + try { + hfileRefsFromQueue = loadHFileRefsFromPeers(); + } catch (KeeperException e) { + LOG.warn("Failed to read hfile references from zookeeper, skipping checking deletable " + + "file for " + fStat.getPath()); + return false; + } + return !hfileRefsFromQueue.contains(fStat.getPath().getName()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java new file mode 100644 index 00000000000..8d5c6d4195b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * This will load all the xml configuration files for the source cluster replication ID from + * user configured replication configuration directory. + */ +@InterfaceAudience.Private +public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurationProvider { + private static final Log LOG = LogFactory.getLog(DefaultSourceFSConfigurationProvider.class); + // Map containing all the source clusters configurations against their replication cluster id + private Map sourceClustersConfs = new HashMap<>(); + private static final String XML = ".xml"; + + @Override + public Configuration getConf(Configuration sinkConf, String replicationClusterId) + throws IOException { + if (sourceClustersConfs.get(replicationClusterId) == null) { + synchronized (this.sourceClustersConfs) { + if (sourceClustersConfs.get(replicationClusterId) == null) { + LOG.info("Loading source cluster FS client conf for cluster " + replicationClusterId); + // Load only user provided client configurations. + Configuration sourceClusterConf = new Configuration(false); + + String replicationConfDir = sinkConf.get(HConstants.REPLICATION_CONF_DIR); + if (replicationConfDir == null) { + LOG.debug(HConstants.REPLICATION_CONF_DIR + " is not configured."); + URL resource = HBaseConfiguration.class.getClassLoader().getResource("hbase-site.xml"); + if (resource != null) { + String path = resource.getPath(); + replicationConfDir = path.substring(0, path.lastIndexOf("/")); + } else { + replicationConfDir = System.getenv("HBASE_CONF_DIR"); + } + } + + LOG.info("Loading source cluster " + replicationClusterId + + " file system configurations from xml files under directory " + replicationConfDir); + File confDir = new File(replicationConfDir, replicationClusterId); + String[] listofConfFiles = FileUtil.list(confDir); + for (String confFile : listofConfFiles) { + if (new File(confDir, confFile).isFile() && confFile.endsWith(XML)) { + // Add all the user provided client conf files + sourceClusterConf.addResource(new Path(confDir.getPath(), confFile)); + } + } + this.sourceClustersConfs.put(replicationClusterId, sourceClusterConf); + } + } + } + return this.sourceClustersConfs.get(replicationClusterId); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index d3519270329..22646dbe666 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -33,26 +33,29 @@ import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.ipc.RemoteException; +import com.google.common.annotations.VisibleForTesting; + /** * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} * implementation for replicating to another HBase cluster. @@ -84,8 +87,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // Handles connecting to peer region servers private ReplicationSinkManager replicationSinkMgr; private boolean peersSelected = false; + private String replicationClusterId = ""; private ThreadPoolExecutor exec; private int maxThreads; + private Path baseNamespaceDir; + private Path hfileArchiveDir; + private boolean replicationBulkLoadDataEnabled; @Override public void init(Context context) throws IOException { @@ -108,7 +115,19 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); this.exec = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, - new SynchronousQueue()); + new SynchronousQueue()); + + this.replicationBulkLoadDataEnabled = + conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); + if (this.replicationBulkLoadDataEnabled) { + replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID); + } + // Construct base namespace directory and hfile archive directory path + Path rootDir = FSUtils.getRootDir(conf); + Path baseNSDir = new Path(HConstants.BASE_NAMESPACE_DIR); + baseNamespaceDir = new Path(rootDir, baseNSDir); + hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY, baseNSDir)); } private void decorateConf() { @@ -317,8 +336,8 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi try { sinkPeer = replicationSinkMgr.getReplicationSink(); BlockingInterface rrs = sinkPeer.getRegionServer(); - ReplicationProtbufUtil.replicateWALEntry(rrs, - entries.toArray(new Entry[entries.size()])); + ReplicationProtbufUtil.replicateWALEntry(rrs, entries.toArray(new Entry[entries.size()]), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); replicationSinkMgr.reportSinkSuccess(sinkPeer); return ordinal; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java new file mode 100644 index 00000000000..17f67804ca0 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -0,0 +1,393 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.math.BigInteger; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Deque; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.token.FsDelegationToken; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +/** + * It is used for replicating HFile entries. It will first copy parallely all the hfiles to a local + * staging directory and then it will use ({@link LoadIncrementalHFiles} to prepare a collection of + * {@link LoadQueueItem} which will finally be loaded(replicated) into the table of this cluster. + */ +@InterfaceAudience.Private +public class HFileReplicator { + /** Maximum number of threads to allow in pool to copy hfiles during replication */ + public static final String REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY = + "hbase.replication.bulkload.copy.maxthreads"; + public static final int REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT = 10; + /** Number of hfiles to copy per thread during replication */ + public static final String REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY = + "hbase.replication.bulkload.copy.hfiles.perthread"; + public static final int REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT = 10; + + private static final Log LOG = LogFactory.getLog(HFileReplicator.class); + private final String UNDERSCORE = "_"; + private final static FsPermission PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx"); + + private Configuration sourceClusterConf; + private String sourceBaseNamespaceDirPath; + private String sourceHFileArchiveDirPath; + private Map>>> bulkLoadHFileMap; + private FileSystem sinkFs; + private FsDelegationToken fsDelegationToken; + private UserProvider userProvider; + private Configuration conf; + private Connection connection; + private String hbaseStagingDir; + private ThreadPoolExecutor exec; + private int maxCopyThreads; + private int copiesPerThread; + + public HFileReplicator(Configuration sourceClusterConf, + String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath, + Map>>> tableQueueMap, Configuration conf, + Connection connection) throws IOException { + this.sourceClusterConf = sourceClusterConf; + this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath; + this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath; + this.bulkLoadHFileMap = tableQueueMap; + this.conf = conf; + this.connection = connection; + + userProvider = UserProvider.instantiate(conf); + fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); + this.hbaseStagingDir = conf.get("hbase.bulkload.staging.dir"); + this.maxCopyThreads = + this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, + REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); + ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); + builder.setNameFormat("HFileReplicationCallable-%1$d"); + this.exec = + new ThreadPoolExecutor(1, maxCopyThreads, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), builder.build()); + this.exec.allowCoreThreadTimeOut(true); + this.copiesPerThread = + conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, + REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); + + sinkFs = FileSystem.get(conf); + } + + public Void replicate() throws IOException { + // Copy all the hfiles to the local file system + Map tableStagingDirsMap = copyHFilesToStagingDir(); + + int maxRetries = conf.getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10); + + for (Entry tableStagingDir : tableStagingDirsMap.entrySet()) { + String tableNameString = tableStagingDir.getKey(); + Path stagingDir = tableStagingDir.getValue(); + + LoadIncrementalHFiles loadHFiles = null; + try { + loadHFiles = new LoadIncrementalHFiles(conf); + } catch (Exception e) { + LOG.error("Failed to initialize LoadIncrementalHFiles for replicating bulk loaded" + + " data.", e); + throw new IOException(e); + } + Configuration newConf = HBaseConfiguration.create(conf); + newConf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no"); + loadHFiles.setConf(newConf); + + TableName tableName = TableName.valueOf(tableNameString); + Table table = this.connection.getTable(tableName); + + // Prepare collection of queue of hfiles to be loaded(replicated) + Deque queue = new LinkedList(); + loadHFiles.prepareHFileQueue(stagingDir, table, queue, false); + + if (queue.isEmpty()) { + LOG.warn("Replication process did not find any files to replicate in directory " + + stagingDir.toUri()); + return null; + } + + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + + fsDelegationToken.acquireDelegationToken(sinkFs); + + // Set the staging directory which will be used by LoadIncrementalHFiles for loading the + // data + loadHFiles.setBulkToken(stagingDir.toString()); + + doBulkLoad(loadHFiles, table, queue, locator, maxRetries); + } finally { + cleanup(stagingDir.toString(), table); + } + } + return null; + } + + private void doBulkLoad(LoadIncrementalHFiles loadHFiles, Table table, + Deque queue, RegionLocator locator, int maxRetries) throws IOException { + int count = 0; + Pair startEndKeys; + while (!queue.isEmpty()) { + // need to reload split keys each iteration. + startEndKeys = locator.getStartEndKeys(); + if (count != 0) { + LOG.warn("Error occured while replicating HFiles, retry attempt " + count + " with " + + queue.size() + " files still remaining to replicate."); + } + + if (maxRetries != 0 && count >= maxRetries) { + throw new IOException("Retry attempted " + count + + " times without completing, bailing out."); + } + count++; + + // Try bulk load + loadHFiles.loadHFileQueue(table, connection, queue, startEndKeys); + } + } + + private void cleanup(String stagingDir, Table table) { + // Release the file system delegation token + fsDelegationToken.releaseDelegationToken(); + // Delete the staging directory + if (stagingDir != null) { + try { + sinkFs.delete(new Path(stagingDir), true); + } catch (IOException e) { + LOG.warn("Failed to delete the staging directory " + stagingDir, e); + } + } + // Do not close the file system + + /* + * if (sinkFs != null) { try { sinkFs.close(); } catch (IOException e) { LOG.warn( + * "Failed to close the file system"); } } + */ + + // Close the table + if (table != null) { + try { + table.close(); + } catch (IOException e) { + LOG.warn("Failed to close the table.", e); + } + } + } + + private Map copyHFilesToStagingDir() throws IOException { + Map mapOfCopiedHFiles = new HashMap(); + Pair> familyHFilePathsPair; + List hfilePaths; + byte[] family; + Path familyStagingDir; + int familyHFilePathsPairsListSize; + int totalNoOfHFiles; + List>> familyHFilePathsPairsList; + FileSystem sourceFs = null; + + try { + Path sourceClusterPath = new Path(sourceBaseNamespaceDirPath); + /* + * Path#getFileSystem will by default get the FS from cache. If both source and sink cluster + * has same FS name service then it will return peer cluster FS. To avoid this we explicitly + * disable the loading of FS from cache, so that a new FS is created with source cluster + * configuration. + */ + String sourceScheme = sourceClusterPath.toUri().getScheme(); + String disableCacheName = + String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme }); + sourceClusterConf.setBoolean(disableCacheName, true); + + sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf); + + User user = userProvider.getCurrent(); + // For each table name in the map + for (Entry>>> tableEntry : bulkLoadHFileMap + .entrySet()) { + String tableName = tableEntry.getKey(); + + // Create staging directory for each table + Path stagingDir = + createStagingDir(new Path(hbaseStagingDir), user, TableName.valueOf(tableName)); + + familyHFilePathsPairsList = tableEntry.getValue(); + familyHFilePathsPairsListSize = familyHFilePathsPairsList.size(); + + // For each list of family hfile paths pair in the table + for (int i = 0; i < familyHFilePathsPairsListSize; i++) { + familyHFilePathsPair = familyHFilePathsPairsList.get(i); + + family = familyHFilePathsPair.getFirst(); + hfilePaths = familyHFilePathsPair.getSecond(); + + familyStagingDir = new Path(stagingDir, Bytes.toString(family)); + totalNoOfHFiles = hfilePaths.size(); + + // For each list of hfile paths for the family + List> futures = new ArrayList>(); + Callable c; + Future future; + int currentCopied = 0; + // Copy the hfiles parallely + while (totalNoOfHFiles > currentCopied + this.copiesPerThread) { + c = + new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, + currentCopied + this.copiesPerThread)); + future = exec.submit(c); + futures.add(future); + currentCopied += this.copiesPerThread; + } + + int remaining = totalNoOfHFiles - currentCopied; + if (remaining > 0) { + c = + new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, + currentCopied + remaining)); + future = exec.submit(c); + futures.add(future); + } + + for (Future f : futures) { + try { + f.get(); + } catch (InterruptedException e) { + InterruptedIOException iioe = + new InterruptedIOException( + "Failed to copy HFiles to local file system. This will be retried again " + + "by the source cluster."); + iioe.initCause(e); + throw iioe; + } catch (ExecutionException e) { + throw new IOException("Failed to copy HFiles to local file system. This will " + + "be retried again by the source cluster.", e); + } + } + } + // Add the staging directory to this table. Staging directory contains all the hfiles + // belonging to this table + mapOfCopiedHFiles.put(tableName, stagingDir); + } + return mapOfCopiedHFiles; + } finally { + if (sourceFs != null) { + sourceFs.close(); + } + if(exec != null) { + exec.shutdown(); + } + } + } + + private Path createStagingDir(Path baseDir, User user, TableName tableName) throws IOException { + String tblName = tableName.getNameAsString().replace(":", UNDERSCORE); + int RANDOM_WIDTH = 320; + int RANDOM_RADIX = 32; + String doubleUnderScore = UNDERSCORE + UNDERSCORE; + String randomDir = user.getShortName() + doubleUnderScore + tblName + doubleUnderScore + + (new BigInteger(RANDOM_WIDTH, new SecureRandom()).toString(RANDOM_RADIX)); + return createStagingDir(baseDir, user, randomDir); + } + + private Path createStagingDir(Path baseDir, User user, String randomDir) throws IOException { + Path p = new Path(baseDir, randomDir); + sinkFs.mkdirs(p, PERM_ALL_ACCESS); + sinkFs.setPermission(p, PERM_ALL_ACCESS); + return p; + } + + /** + * This class will copy the given hfiles from the given source file system to the given local file + * system staging directory. + */ + private class Copier implements Callable { + private FileSystem sourceFs; + private Path stagingDir; + private List hfiles; + + public Copier(FileSystem sourceFs, final Path stagingDir, final List hfiles) + throws IOException { + this.sourceFs = sourceFs; + this.stagingDir = stagingDir; + this.hfiles = hfiles; + } + + @Override + public Void call() throws IOException { + Path sourceHFilePath; + Path localHFilePath; + int totalHFiles = hfiles.size(); + for (int i = 0; i < totalHFiles; i++) { + sourceHFilePath = new Path(sourceBaseNamespaceDirPath, hfiles.get(i)); + localHFilePath = new Path(stagingDir, sourceHFilePath.getName()); + try { + FileUtil.copy(sourceFs, sourceHFilePath, sinkFs, localHFilePath, false, conf); + // If any other exception other than FNFE then we will fail the replication requests and + // source will retry to replicate these data. + } catch (FileNotFoundException e) { + LOG.info("Failed to copy hfile from " + sourceHFilePath + " to " + localHFilePath + + ". Trying to copy from hfile archive directory.", + e); + sourceHFilePath = new Path(sourceHFileArchiveDirPath, hfiles.get(i)); + + try { + FileUtil.copy(sourceFs, sourceHFilePath, sinkFs, localHFilePath, false, conf); + } catch (FileNotFoundException e1) { + // This will mean that the hfile does not exists any where in source cluster FS. So we + // cannot do anything here just log and return. + LOG.error("Failed to copy hfile from " + sourceHFilePath + " to " + localHFilePath + + ". Hence ignoring this hfile from replication..", + e1); + return null; + } + } + sinkFs.setPermission(localHFilePath, PERM_ALL_ACCESS); + } + return null; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index 37dc1dd4e4e..f308daf8e84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -47,7 +47,7 @@ public class MetricsSink { if (lastTimestampForAge != timestamp) { lastTimestampForAge = timestamp; age = System.currentTimeMillis() - lastTimestampForAge; - } + } mss.setLastAppliedOpAge(age); return age; } @@ -71,6 +71,17 @@ public class MetricsSink { mss.incrAppliedOps(batchSize); } + /** + * Convience method to change metrics when a batch of operations are applied. + * + * @param batchSize total number of mutations that are applied/replicated + * @param hfileSize total number of hfiles that are applied/replicated + */ + public void applyBatch(long batchSize, long hfileSize) { + applyBatch(batchSize); + mss.incrAppliedHFiles(hfileSize); + } + /** * Get the Age of Last Applied Op * @return ageOfLastAppliedOp diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index f9f7001653e..9687af7693d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -40,11 +40,13 @@ public class MetricsSource { // tracks last shipped timestamp for each wal group private Map lastTimeStamps = new HashMap(); private int lastQueueSize = 0; + private long lastHFileRefsQueueSize = 0; private String id; private final MetricsReplicationSourceSource singleSourceSource; private final MetricsReplicationSourceSource globalSourceSource; + /** * Constructor used to register the metrics * @@ -143,6 +145,18 @@ public class MetricsSource { globalSourceSource.incrShippedKBs(sizeInKB); } + /** + * Convience method to apply changes to metrics do to shipping a batch of logs. + * + * @param batchSize the size of the batch that was shipped to sinks. + * @param hfiles total number of hfiles shipped to sinks. + */ + public void shipBatch(long batchSize, int sizeInKB, long hfiles) { + shipBatch(batchSize, sizeInKB); + singleSourceSource.incrHFilesShipped(hfiles); + globalSourceSource.incrHFilesShipped(hfiles); + } + /** increase the byte number read by source from log file */ public void incrLogReadInBytes(long readInBytes) { singleSourceSource.incrLogReadInBytes(readInBytes); @@ -153,8 +167,10 @@ public class MetricsSource { public void clear() { singleSourceSource.clear(); globalSourceSource.decrSizeOfLogQueue(lastQueueSize); + globalSourceSource.decrSizeOfHFileRefsQueue(lastHFileRefsQueueSize); lastTimeStamps.clear(); lastQueueSize = 0; + lastHFileRefsQueueSize = 0; } /** @@ -194,4 +210,19 @@ public class MetricsSource { public String getPeerID() { return id; } + + public void incrSizeOfHFileRefsQueue(long size) { + singleSourceSource.incrSizeOfHFileRefsQueue(size); + globalSourceSource.incrSizeOfHFileRefsQueue(size); + lastHFileRefsQueueSize = size; + } + + public void decrSizeOfHFileRefsQueue(int size) { + singleSourceSource.decrSizeOfHFileRefsQueue(size); + globalSourceSource.decrSizeOfHFileRefsQueue(size); + lastHFileRefsQueueSize -= size; + if (lastHFileRefsQueueSize < 0) { + lastHFileRefsQueueSize = 0; + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index db3b87d471f..1b4e0aad2dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -650,8 +650,8 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { // set the region name for the target region replica Pair p = - ReplicationProtbufUtil.buildReplicateWALEntryRequest( - entriesArray, location.getRegionInfo().getEncodedNameAsBytes()); + ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray, + location.getRegionInfo().getEncodedNameAsBytes(), null, null, null); try { PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond()); controller.setCallTimeout(timeout); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 78bb92e989d..7110273ec88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -34,7 +34,6 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -42,13 +41,16 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.regionserver.ReplicationSinkService; import org.apache.hadoop.hbase.regionserver.ReplicationSourceService; -import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -56,8 +58,10 @@ import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueues; import org.apache.hadoop.hbase.replication.ReplicationTracker; +import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.zookeeper.KeeperException; @@ -72,6 +76,7 @@ public class Replication extends WALActionsListener.Base implements private static final Log LOG = LogFactory.getLog(Replication.class); private boolean replication; + private boolean replicationForBulkLoadData; private ReplicationSourceManager replicationManager; private ReplicationQueues replicationQueues; private ReplicationPeers replicationPeers; @@ -85,7 +90,6 @@ public class Replication extends WALActionsListener.Base implements private int statsThreadPeriod; // ReplicationLoad to access replication metrics private ReplicationLoad replicationLoad; - /** * Instantiate the replication management (if rep is enabled). * @param server Hosting server @@ -110,11 +114,20 @@ public class Replication extends WALActionsListener.Base implements this.server = server; this.conf = this.server.getConfiguration(); this.replication = isReplication(this.conf); + this.replicationForBulkLoadData = isReplicationForBulkLoadDataEnabled(this.conf); this.scheduleThreadPool = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() .setNameFormat(server.getServerName().toShortString() + "Replication Statistics #%d") .setDaemon(true) .build()); + if (this.replicationForBulkLoadData) { + if (conf.get(HConstants.REPLICATION_CLUSTER_ID) == null + || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty()) { + throw new IllegalArgumentException(HConstants.REPLICATION_CLUSTER_ID + + " cannot be null/empty when " + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + + " is set to true."); + } + } if (replication) { try { this.replicationQueues = @@ -159,6 +172,15 @@ public class Replication extends WALActionsListener.Base implements return c.getBoolean(REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); } + /** + * @param c Configuration to look at + * @return True if replication for bulk load data is enabled. + */ + public static boolean isReplicationForBulkLoadDataEnabled(final Configuration c) { + return c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); + } + /* * Returns an object to listen to new wal changes **/ @@ -188,14 +210,22 @@ public class Replication extends WALActionsListener.Base implements /** * Carry on the list of log entries down to the sink * @param entries list of entries to replicate - * @param cells The data -- the cells -- that entries describes (the entries - * do not contain the Cells we are replicating; they are passed here on the side in this - * CellScanner). + * @param cells The data -- the cells -- that entries describes (the entries do not + * contain the Cells we are replicating; they are passed here on the side in this + * CellScanner). + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory + * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace + * directory required for replicating hfiles + * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory * @throws IOException */ - public void replicateLogEntries(List entries, CellScanner cells) throws IOException { + public void replicateLogEntries(List entries, CellScanner cells, + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException { if (this.replication) { - this.replicationSink.replicateEntries(entries, cells); + this.replicationSink.replicateEntries(entries, cells, replicationClusterId, + sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath); } } @@ -227,34 +257,44 @@ public class Replication extends WALActionsListener.Base implements } @Override - public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, - WALEdit logEdit) { - scopeWALEdits(htd, logKey, logEdit); + public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit) + throws IOException { + scopeWALEdits(htd, logKey, logEdit, this.conf, this.getReplicationManager()); } /** - * Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys - * from compaction WAL edits and if the scope is local. + * Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys from + * compaction WAL edits and if the scope is local. * @param htd Descriptor used to find the scope to use * @param logKey Key that may get scoped according to its edits * @param logEdit Edits used to lookup the scopes + * @param replicationManager Manager used to add bulk load events hfile references + * @throws IOException If failed to parse the WALEdit */ - public static void scopeWALEdits(HTableDescriptor htd, WALKey logKey, - WALEdit logEdit) { - NavigableMap scopes = - new TreeMap(Bytes.BYTES_COMPARATOR); + public static void scopeWALEdits(HTableDescriptor htd, WALKey logKey, WALEdit logEdit, + Configuration conf, ReplicationSourceManager replicationManager) throws IOException { + NavigableMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); byte[] family; + boolean replicationForBulkLoadEnabled = isReplicationForBulkLoadDataEnabled(conf); for (Cell cell : logEdit.getCells()) { - family = cell.getFamily(); - // This is expected and the KV should not be replicated - if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) continue; - // Unexpected, has a tendency to happen in unit tests - assert htd.getFamily(family) != null; + if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { + if (replicationForBulkLoadEnabled && CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { + scopeBulkLoadEdits(htd, replicationManager, scopes, logKey.getTablename(), cell); + } else { + // Skip the flush/compaction/region events + continue; + } + } else { + family = CellUtil.cloneFamily(cell); + // Unexpected, has a tendency to happen in unit tests + assert htd.getFamily(family) != null; - int scope = htd.getFamily(family).getScope(); - if (scope != REPLICATION_SCOPE_LOCAL && - !scopes.containsKey(family)) { - scopes.put(family, scope); + if (!scopes.containsKey(family)) { + int scope = htd.getFamily(family).getScope(); + if (scope != REPLICATION_SCOPE_LOCAL) { + scopes.put(family, scope); + } + } } } if (!scopes.isEmpty()) { @@ -262,6 +302,40 @@ public class Replication extends WALActionsListener.Base implements } } + private static void scopeBulkLoadEdits(HTableDescriptor htd, + ReplicationSourceManager replicationManager, NavigableMap scopes, + TableName tableName, Cell cell) throws IOException { + byte[] family; + try { + BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell); + for (StoreDescriptor s : bld.getStoresList()) { + family = s.getFamilyName().toByteArray(); + if (!scopes.containsKey(family)) { + int scope = htd.getFamily(family).getScope(); + if (scope != REPLICATION_SCOPE_LOCAL) { + scopes.put(family, scope); + addHFileRefsToQueue(replicationManager, tableName, family, s); + } + } else { + addHFileRefsToQueue(replicationManager, tableName, family, s); + } + } + } catch (IOException e) { + LOG.error("Failed to get bulk load events information from the wal file.", e); + throw e; + } + } + + private static void addHFileRefsToQueue(ReplicationSourceManager replicationManager, + TableName tableName, byte[] family, StoreDescriptor s) throws IOException { + try { + replicationManager.addHFileRefs(tableName, family, s.getStoreFileList()); + } catch (ReplicationException e) { + LOG.error("Failed to create hfile references in ZK.", e); + throw new IOException(e); + } + } + @Override public void preLogRoll(Path oldPath, Path newPath) throws IOException { getReplicationManager().preLogRoll(newPath); @@ -273,8 +347,7 @@ public class Replication extends WALActionsListener.Base implements } /** - * This method modifies the master's configuration in order to inject - * replication-related features + * This method modifies the master's configuration in order to inject replication-related features * @param conf */ public static void decorateMasterConfiguration(Configuration conf) { @@ -286,6 +359,13 @@ public class Replication extends WALActionsListener.Base implements if (!plugins.contains(cleanerClass)) { conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass); } + if (isReplicationForBulkLoadDataEnabled(conf)) { + plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); + cleanerClass = ReplicationHFileCleaner.class.getCanonicalName(); + if (!plugins.contains(cleanerClass)) { + conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, plugins + "," + cleanerClass); + } + } } /* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 4dd76cbb928..8f262c55fa0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -33,15 +33,16 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; @@ -51,6 +52,11 @@ import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; /** *

@@ -78,6 +84,9 @@ public class ReplicationSink { private final MetricsSink metrics; private final AtomicLong totalReplicatedEdits = new AtomicLong(); private final Object sharedHtableConLock = new Object(); + // Number of hfiles that we successfully replicated + private long hfilesReplicated = 0; + private SourceFSConfigurationProvider provider; /** * Create a sink for replication @@ -91,6 +100,18 @@ public class ReplicationSink { this.conf = HBaseConfiguration.create(conf); decorateConf(); this.metrics = new MetricsSink(); + + String className = + conf.get("hbase.replication.source.fs.conf.provider", + DefaultSourceFSConfigurationProvider.class.getCanonicalName()); + try { + @SuppressWarnings("rawtypes") + Class c = Class.forName(className); + this.provider = (SourceFSConfigurationProvider) c.newInstance(); + } catch (Exception e) { + throw new IllegalArgumentException("Configured source fs configuration provider class " + + className + " throws error.", e); + } } /** @@ -113,9 +134,16 @@ public class ReplicationSink { * operates against raw protobuf type saving on a conversion from pb to pojo. * @param entries * @param cells - * @throws IOException + * @param replicationClusterId Id which will uniquely identify source cluster FS client + * configurations in the replication configuration directory + * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace + * directory + * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory + * @throws IOException If failed to replicate the data */ - public void replicateEntries(List entries, final CellScanner cells) throws IOException { + public void replicateEntries(List entries, final CellScanner cells, + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException { if (entries.isEmpty()) return; if (cells == null) throw new NullPointerException("TODO: Add handling of null CellScanner"); // Very simple optimization where we batch sequences of rows going @@ -126,6 +154,10 @@ public class ReplicationSink { // invocation of this method per table and cluster id. Map, List>> rowMap = new TreeMap, List>>(); + + // Map of table name Vs list of pair of family and list of hfile paths from its namespace + Map>>> bulkLoadHFileMap = null; + for (WALEntry entry : entries) { TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray()); @@ -138,33 +170,60 @@ public class ReplicationSink { throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i); } Cell cell = cells.current(); - if (isNewRowOrType(previousCell, cell)) { - // Create new mutation - m = CellUtil.isDelete(cell)? - new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()): - new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - List clusterIds = new ArrayList(); - for(HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()){ - clusterIds.add(toUUID(clusterId)); + // Handle bulk load hfiles replication + if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { + if (bulkLoadHFileMap == null) { + bulkLoadHFileMap = new HashMap>>>(); } - m.setClusterIds(clusterIds); - addToHashMultiMap(rowMap, table, clusterIds, m); - } - if (CellUtil.isDelete(cell)) { - ((Delete)m).addDeleteMarker(cell); + buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell); } else { - ((Put)m).add(cell); + // Handle wal replication + if (isNewRowOrType(previousCell, cell)) { + // Create new mutation + m = + CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength()); + List clusterIds = new ArrayList(); + for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) { + clusterIds.add(toUUID(clusterId)); + } + m.setClusterIds(clusterIds); + addToHashMultiMap(rowMap, table, clusterIds, m); + } + if (CellUtil.isDelete(cell)) { + ((Delete) m).addDeleteMarker(cell); + } else { + ((Put) m).add(cell); + } + previousCell = cell; } - previousCell = cell; } totalReplicated++; } - for (Entry,List>> entry : rowMap.entrySet()) { - batch(entry.getKey(), entry.getValue().values()); + + // TODO Replicating mutations and bulk loaded data can be made parallel + if (!rowMap.isEmpty()) { + LOG.debug("Started replicating mutations."); + for (Entry, List>> entry : rowMap.entrySet()) { + batch(entry.getKey(), entry.getValue().values()); + } + LOG.debug("Finished replicating mutations."); } + + if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) { + LOG.debug("Started replicating bulk loaded data."); + HFileReplicator hFileReplicator = + new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), + sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, + getConnection()); + hFileReplicator.replicate(); + LOG.debug("Finished replicating bulk loaded data."); + } + int size = entries.size(); this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime()); - this.metrics.applyBatch(size); + this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated); this.totalReplicatedEdits.addAndGet(totalReplicated); } catch (IOException ex) { LOG.error("Unable to accept edit because:", ex); @@ -172,6 +231,76 @@ public class ReplicationSink { } } + private void buildBulkLoadHFileMap( + final Map>>> bulkLoadHFileMap, TableName table, + Cell cell) throws IOException { + BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell); + List storesList = bld.getStoresList(); + int storesSize = storesList.size(); + for (int j = 0; j < storesSize; j++) { + StoreDescriptor storeDescriptor = storesList.get(j); + List storeFileList = storeDescriptor.getStoreFileList(); + int storeFilesSize = storeFileList.size(); + hfilesReplicated += storeFilesSize; + for (int k = 0; k < storeFilesSize; k++) { + byte[] family = storeDescriptor.getFamilyName().toByteArray(); + + // Build hfile relative path from its namespace + String pathToHfileFromNS = getHFilePath(table, bld, storeFileList.get(k), family); + + String tableName = table.getNameWithNamespaceInclAsString(); + if (bulkLoadHFileMap.containsKey(tableName)) { + List>> familyHFilePathsList = bulkLoadHFileMap.get(tableName); + boolean foundFamily = false; + for (int i = 0; i < familyHFilePathsList.size(); i++) { + Pair> familyHFilePathsPair = familyHFilePathsList.get(i); + if (Bytes.equals(familyHFilePathsPair.getFirst(), family)) { + // Found family already present, just add the path to the existing list + familyHFilePathsPair.getSecond().add(pathToHfileFromNS); + foundFamily = true; + break; + } + } + if (!foundFamily) { + // Family not found, add this family and its hfile paths pair to the list + addFamilyAndItsHFilePathToTableInMap(family, pathToHfileFromNS, familyHFilePathsList); + } + } else { + // Add this table entry into the map + addNewTableEntryInMap(bulkLoadHFileMap, family, pathToHfileFromNS, tableName); + } + } + } + } + + private void addFamilyAndItsHFilePathToTableInMap(byte[] family, String pathToHfileFromNS, + List>> familyHFilePathsList) { + List hfilePaths = new ArrayList(); + hfilePaths.add(pathToHfileFromNS); + familyHFilePathsList.add(new Pair>(family, hfilePaths)); + } + + private void addNewTableEntryInMap( + final Map>>> bulkLoadHFileMap, byte[] family, + String pathToHfileFromNS, String tableName) { + List hfilePaths = new ArrayList(); + hfilePaths.add(pathToHfileFromNS); + Pair> newFamilyHFilePathsPair = + new Pair>(family, hfilePaths); + List>> newFamilyHFilePathsList = + new ArrayList>>(); + newFamilyHFilePathsList.add(newFamilyHFilePathsPair); + bulkLoadHFileMap.put(tableName, newFamilyHFilePathsList); + } + + private String getHFilePath(TableName table, BulkLoadDescriptor bld, String storeFile, + byte[] family) { + return new StringBuilder(100).append(table.getNamespaceAsString()).append(Path.SEPARATOR) + .append(table.getQualifierAsString()).append(Path.SEPARATOR) + .append(Bytes.toString(bld.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR) + .append(Bytes.toString(family)).append(Path.SEPARATOR).append(storeFile).toString(); + } + /** * @param previousCell * @param cell @@ -241,22 +370,13 @@ public class ReplicationSink { } Table table = null; try { - // See https://en.wikipedia.org/wiki/Double-checked_locking - Connection connection = this.sharedHtableCon; - if (connection == null) { - synchronized (sharedHtableConLock) { - connection = this.sharedHtableCon; - if (connection == null) { - connection = this.sharedHtableCon = ConnectionFactory.createConnection(this.conf); - } - } - } + Connection connection = getConnection(); table = connection.getTable(tableName); for (List rows : allRows) { table.batch(rows); } } catch (InterruptedException ix) { - throw (InterruptedIOException)new InterruptedIOException().initCause(ix); + throw (InterruptedIOException) new InterruptedIOException().initCause(ix); } finally { if (table != null) { table.close(); @@ -264,6 +384,20 @@ public class ReplicationSink { } } + private Connection getConnection() throws IOException { + // See https://en.wikipedia.org/wiki/Double-checked_locking + Connection connection = sharedHtableCon; + if (connection == null) { + synchronized (sharedHtableConLock) { + connection = sharedHtableCon; + if (connection == null) { + connection = sharedHtableCon = ConnectionFactory.createConnection(conf); + } + } + } + return connection; + } + /** * Get a string representation of this sink's metrics * @return string with the total replicated edits count and the date diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index f08a73b1ca4..804c6bb2641 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -37,7 +37,6 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -47,9 +46,10 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ChainWALEntryFilter; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; @@ -59,8 +59,12 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.replication.ReplicationQueues; import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter; import org.apache.hadoop.hbase.replication.WALEntryFilter; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALKey; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; @@ -223,6 +227,34 @@ public class ReplicationSource extends Thread } } + @Override + public void addHFileRefs(TableName tableName, byte[] family, List files) + throws ReplicationException { + String peerId = peerClusterZnode; + if (peerId.contains("-")) { + // peerClusterZnode will be in the form peerId + "-" + rsZNode. + // A peerId will not have "-" in its name, see HBASE-11394 + peerId = peerClusterZnode.split("-")[0]; + } + Map> tableCFMap = replicationPeers.getPeer(peerId).getTableCFs(); + if (tableCFMap != null) { + List tableCfs = tableCFMap.get(tableName); + if (tableCFMap.containsKey(tableName) + && (tableCfs == null || tableCfs.contains(Bytes.toString(family)))) { + this.replicationQueues.addHFileRefs(peerId, files); + metrics.incrSizeOfHFileRefsQueue(files.size()); + } else { + LOG.debug("HFiles will not be replicated belonging to the table " + tableName + " family " + + Bytes.toString(family) + " to peer id " + peerId); + } + } else { + // user has explicitly not defined any table cfs for replication, means replicate all the + // data + this.replicationQueues.addHFileRefs(peerId, files); + metrics.incrSizeOfHFileRefsQueue(files.size()); + } + } + private void uninitialize() { LOG.debug("Source exiting " + this.peerId); metrics.clear(); @@ -471,6 +503,8 @@ public class ReplicationSource extends Thread private int currentSize = 0; // Indicates whether this particular worker is running private boolean workerRunning = true; + // Current number of hfiles that we need to replicate + private long currentNbHFiles = 0; public ReplicationSourceWorkerThread(String walGroupId, PriorityBlockingQueue queue, ReplicationQueueInfo replicationQueueInfo, ReplicationSource source) { @@ -550,6 +584,7 @@ public class ReplicationSource extends Thread boolean gotIOE = false; currentNbOperations = 0; + currentNbHFiles = 0; List entries = new ArrayList(1); currentSize = 0; try { @@ -701,6 +736,28 @@ public class ReplicationSource extends Thread return seenEntries == 0 && processEndOfFile(); } + private void cleanUpHFileRefs(WALEdit edit) throws IOException { + String peerId = peerClusterZnode; + if (peerId.contains("-")) { + // peerClusterZnode will be in the form peerId + "-" + rsZNode. + // A peerId will not have "-" in its name, see HBASE-11394 + peerId = peerClusterZnode.split("-")[0]; + } + List cells = edit.getCells(); + for (int i = 0; i < cells.size(); i++) { + Cell cell = cells.get(i); + if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { + BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell); + List stores = bld.getStoresList(); + for (int j = 0; j < stores.size(); j++) { + List storeFileList = stores.get(j).getStoreFileList(); + manager.cleanUpHFileRefs(peerId, storeFileList); + metrics.decrSizeOfHFileRefsQueue(storeFileList.size()); + } + } + } + } + /** * Poll for the next path * @return true if a path was obtained, false if not @@ -853,14 +910,31 @@ public class ReplicationSource extends Thread private int countDistinctRowKeys(WALEdit edit) { List cells = edit.getCells(); int distinctRowKeys = 1; + int totalHFileEntries = 0; Cell lastCell = cells.get(0); + for (int i = 0; i < edit.size(); i++) { + // Count HFiles to be replicated + if (CellUtil.matchingQualifier(cells.get(i), WALEdit.BULK_LOAD)) { + try { + BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cells.get(i)); + List stores = bld.getStoresList(); + for (int j = 0; j < stores.size(); j++) { + totalHFileEntries += stores.get(j).getStoreFileList().size(); + } + } catch (IOException e) { + LOG.error("Failed to deserialize bulk load entry from wal edit. " + + "This its hfiles count will not be added into metric."); + } + } + if (!CellUtil.matchingRow(cells.get(i), lastCell)) { distinctRowKeys++; } lastCell = cells.get(i); } - return distinctRowKeys; + currentNbHFiles += totalHFileEntries; + return distinctRowKeys + totalHFileEntries; } /** @@ -914,6 +988,12 @@ public class ReplicationSource extends Thread } if (this.lastLoggedPosition != this.repLogReader.getPosition()) { + //Clean up hfile references + int size = entries.size(); + for (int i = 0; i < size; i++) { + cleanUpHFileRefs(entries.get(i).getEdit()); + } + //Log and clean up WAL logs manager.logPositionAndCleanOldLogs(this.currentPath, peerClusterZnode, this.repLogReader.getPosition(), this.replicationQueueInfo.isQueueRecovered(), currentWALisBeingWrittenTo); @@ -925,7 +1005,7 @@ public class ReplicationSource extends Thread totalReplicatedEdits.addAndGet(entries.size()); totalReplicatedOperations.addAndGet(currentNbOperations); // FIXME check relationship between wal group and overall - metrics.shipBatch(currentNbOperations, currentSize / 1024); + metrics.shipBatch(currentNbOperations, currentSize / 1024, currentNbHFiles); metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId); if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index 1e9c714b486..7f4a9f7fcf8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; +import java.util.List; import java.util.UUID; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -26,7 +27,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueues; @@ -105,4 +108,14 @@ public interface ReplicationSourceInterface { */ String getStats(); + /** + * Add hfile names to the queue to be replicated. + * @param tableName Name of the table these files belongs to + * @param family Name of the family these files belong to + * @param files files whose names needs to be added to the queue to be replicated + * @throws ReplicationException If failed to add hfile references + */ + void addHFileRefs(TableName tableName, byte[] family, List files) + throws ReplicationException; + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index a8cffbad9af..9ff4b2d9003 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -45,8 +45,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; @@ -225,8 +227,16 @@ public class ReplicationSourceManager implements ReplicationListener { * old region server wal queues */ protected void init() throws IOException, ReplicationException { + boolean replicationForBulkLoadDataEnabled = + conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); for (String id : this.replicationPeers.getPeerIds()) { addSource(id); + if (replicationForBulkLoadDataEnabled) { + // Check if peer exists in hfile-refs queue, if not add it. This can happen in the case + // when a peer was added before replication for bulk loaded data was enabled. + this.replicationQueues.addPeerToHFileRefs(id); + } } List currentReplicators = this.replicationQueues.getListOfReplicators(); if (currentReplicators == null || currentReplicators.size() == 0) { @@ -733,4 +743,15 @@ public class ReplicationSourceManager implements ReplicationListener { } return stats.toString(); } + + public void addHFileRefs(TableName tableName, byte[] family, List files) + throws ReplicationException { + for (ReplicationSourceInterface source : this.sources) { + source.addHFileRefs(tableName, family, files); + } + } + + public void cleanUpHFileRefs(String peerId, List files) { + this.replicationQueues.removeHFileRefs(peerId, files); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java new file mode 100644 index 00000000000..82711159af1 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Interface that defines how a region server in peer cluster will get source cluster file system + * configurations. User can configure their custom implementation implementing this interface by + * setting the value of their custom implementation's fully qualified class name to + * hbase.replication.source.fs.conf.provider property in RegionServer configuration. Default is + * {@link DefaultSourceFSConfigurationProvider} + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) +public interface SourceFSConfigurationProvider { + + /** + * Returns the source cluster file system configuration for the given source cluster replication + * ID. + * @param sinkConf sink cluster configuration + * @param replicationClusterId unique ID which identifies the source cluster + * @return source cluster file system configuration + * @throws IOException for invalid directory or for a bad disk. + */ + public Configuration getConf(Configuration sinkConf, String replicationClusterId) + throws IOException; + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index dc6b1518882..7982c16e3df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -216,7 +216,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService for(ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) { familyPaths.add(new Pair(el.getFamily().toByteArray(),el.getPath())); } - + Token userToken = null; if (userProvider.isHadoopSecurityEnabled()) { userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken() @@ -374,6 +374,14 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService public String prepareBulkLoad(final byte[] family, final String srcPath) throws IOException { Path p = new Path(srcPath); Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName())); + + // In case of Replication for bulk load files, hfiles are already copied in staging directory + if (p.equals(stageP)) { + LOG.debug(p.getName() + + " is already available in staging directory. Skipping copy or rename."); + return stageP.toString(); + } + if (srcFs == null) { srcFs = FileSystem.get(p.toUri(), conf); } @@ -413,6 +421,14 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService Path p = new Path(srcPath); Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName())); + + // In case of Replication for bulk load files, hfiles are not renamed by end point during + // prepare stage, so no need of rename here again + if (p.equals(stageP)) { + LOG.debug(p.getName() + " is already available in source directory. Skipping rename."); + return; + } + LOG.debug("Moving " + stageP + " back to " + p); if(!fs.rename(stageP, p)) throw new IOException("Failed to move HFile: " + stageP + " to " + p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java new file mode 100644 index 00000000000..87db3862425 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -0,0 +1,264 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.master.cleaner; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.replication.ReplicationException; +import org.apache.hadoop.hbase.replication.ReplicationFactory; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeers; +import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; +import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl; +import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; +import org.apache.hadoop.hbase.replication.regionserver.Replication; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +@Category({ MasterTests.class, SmallTests.class }) +public class TestReplicationHFileCleaner { + private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static Server server; + private static ReplicationQueues rq; + private static ReplicationPeers rp; + private static final String peerId = "TestReplicationHFileCleaner"; + private static Configuration conf = TEST_UTIL.getConfiguration(); + static FileSystem fs = null; + Path root; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniZKCluster(); + server = new DummyServer(); + conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); + Replication.decorateMasterConfiguration(conf); + rp = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server); + rp.init(); + + rq = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server); + rq.init(server.getServerName().toString()); + try { + fs = FileSystem.get(conf); + } finally { + if (fs != null) { + fs.close(); + } + } + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniZKCluster(); + } + + @Before + public void setup() throws ReplicationException, IOException { + root = TEST_UTIL.getDataTestDirOnTestFS(); + rp.addPeer(peerId, new ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey()), null); + } + + @After + public void cleanup() throws ReplicationException { + try { + fs.delete(root, true); + } catch (IOException e) { + LOG.warn("Failed to delete files recursively from path " + root); + } + rp.removePeer(peerId); + } + + @Test + public void testIsFileDeletable() throws IOException, ReplicationException { + // 1. Create a file + Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs"); + fs.createNewFile(file); + // 2. Assert file is successfully created + assertTrue("Test file not created!", fs.exists(file)); + ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); + cleaner.setConf(conf); + // 3. Assert that file as is should be deletable + assertTrue("Cleaner should allow to delete this file as there is no hfile reference node " + + "for it in the queue.", + cleaner.isFileDeletable(fs.getFileStatus(file))); + + List files = new ArrayList(1); + files.add(file.getName()); + // 4. Add the file to hfile-refs queue + rq.addHFileRefs(peerId, files); + // 5. Assert file should not be deletable + assertFalse("Cleaner should not allow to delete this file as there is a hfile reference node " + + "for it in the queue.", + cleaner.isFileDeletable(fs.getFileStatus(file))); + } + + @Test + public void testGetDeletableFiles() throws Exception { + // 1. Create two files and assert that they do not exist + Path notDeletablefile = new Path(root, "testGetDeletableFiles_1"); + fs.createNewFile(notDeletablefile); + assertTrue("Test file not created!", fs.exists(notDeletablefile)); + Path deletablefile = new Path(root, "testGetDeletableFiles_2"); + fs.createNewFile(deletablefile); + assertTrue("Test file not created!", fs.exists(deletablefile)); + + List files = new ArrayList(2); + FileStatus f = new FileStatus(); + f.setPath(deletablefile); + files.add(f); + f = new FileStatus(); + f.setPath(notDeletablefile); + files.add(f); + + List hfiles = new ArrayList<>(1); + hfiles.add(notDeletablefile.getName()); + // 2. Add one file to hfile-refs queue + rq.addHFileRefs(peerId, hfiles); + + ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); + cleaner.setConf(conf); + Iterator deletableFilesIterator = cleaner.getDeletableFiles(files).iterator(); + int i = 0; + while (deletableFilesIterator.hasNext() && i < 2) { + i++; + } + // 5. Assert one file should not be deletable and it is present in the list returned + if (i > 2) { + fail("File " + notDeletablefile + + " should not be deletable as its hfile reference node is not added."); + } + assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile)); + } + + /* + * Test for HBASE-14621. This test will not assert directly anything. Without the fix the test + * will end up in a infinite loop, so it will timeout. + */ + @Test(timeout = 15000) + public void testForDifferntHFileRefsZnodeVersion() throws Exception { + // 1. Create a file + Path file = new Path(root, "testForDifferntHFileRefsZnodeVersion"); + fs.createNewFile(file); + // 2. Assert file is successfully created + assertTrue("Test file not created!", fs.exists(file)); + ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); + cleaner.setConf(conf); + + ReplicationQueuesClient replicationQueuesClient = Mockito.mock(ReplicationQueuesClient.class); + //Return different znode version for each call + Mockito.when(replicationQueuesClient.getHFileRefsNodeChangeVersion()).thenReturn(1, 2); + + Class cleanerClass = cleaner.getClass(); + Field rqc = cleanerClass.getDeclaredField("rqc"); + rqc.setAccessible(true); + rqc.set(cleaner, replicationQueuesClient); + + cleaner.isFileDeletable(fs.getFileStatus(file)); + } + + static class DummyServer implements Server { + + @Override + public Configuration getConfiguration() { + return TEST_UTIL.getConfiguration(); + } + + @Override + public ZooKeeperWatcher getZooKeeper() { + try { + return new ZooKeeperWatcher(getConfiguration(), "dummy server", this); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + @Override + public CoordinatedStateManager getCoordinatedStateManager() { + return null; + } + + @Override + public ClusterConnection getConnection() { + return null; + } + + @Override + public MetaTableLocator getMetaTableLocator() { + return null; + } + + @Override + public ServerName getServerName() { + return ServerName.valueOf("regionserver,60020,000000"); + } + + @Override + public void abort(String why, Throwable e) { + } + + @Override + public boolean isAborted() { + return false; + } + + @Override + public void stop(String why) { + } + + @Override + public boolean isStopped() { + return false; + } + + @Override + public ChoreService getChoreService() { + return null; + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java index f463f760321..abe484eac79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java @@ -19,12 +19,14 @@ package org.apache.hadoop.hbase.replication; import java.io.IOException; +import java.util.List; import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager; @@ -89,4 +91,10 @@ public class ReplicationSourceDummy implements ReplicationSourceInterface { public String getStats() { return ""; } + + @Override + public void addHFileRefs(TableName tableName, byte[] family, List files) + throws ReplicationException { + return; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index d63224b226e..c7b30aaafff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -19,15 +19,21 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import java.io.Closeable; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Random; +import java.util.concurrent.CountDownLatch; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -35,6 +41,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -51,10 +58,15 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RSRpcServices; +import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.replication.regionserver.TestSourceFSConfigurationProvider; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.After; @@ -80,6 +92,7 @@ public class TestMasterReplication { private static final TableName tableName = TableName.valueOf("test"); private static final byte[] famName = Bytes.toBytes("f"); + private static final byte[] famName1 = Bytes.toBytes("f1"); private static final byte[] row = Bytes.toBytes("row"); private static final byte[] row1 = Bytes.toBytes("row1"); private static final byte[] row2 = Bytes.toBytes("row2"); @@ -104,7 +117,11 @@ public class TestMasterReplication { baseConfiguration.setInt("hbase.regionserver.maxlogs", 10); baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10); baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY, - HConstants.REPLICATION_ENABLE_DEFAULT); + HConstants.REPLICATION_ENABLE_DEFAULT); + baseConfiguration.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); + baseConfiguration.set("hbase.replication.source.fs.conf.provider", + TestSourceFSConfigurationProvider.class.getCanonicalName()); + baseConfiguration.set(HConstants.REPLICATION_CLUSTER_ID, "12345"); baseConfiguration.setBoolean("dfs.support.append", true); baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); baseConfiguration.setStrings( @@ -115,6 +132,9 @@ public class TestMasterReplication { HColumnDescriptor fam = new HColumnDescriptor(famName); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); + fam = new HColumnDescriptor(famName1); + fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); + table.addFamily(fam); fam = new HColumnDescriptor(noRepfamName); table.addFamily(fam); } @@ -131,14 +151,7 @@ public class TestMasterReplication { int numClusters = 2; Table[] htables = null; try { - startMiniClusters(numClusters); - createTableOnClusters(table); - - htables = getHTablesOnClusters(tableName); - - // Test the replication scenarios of 0 -> 1 -> 0 - addPeer("1", 0, 1); - addPeer("1", 1, 0); + htables = setUpClusterTablesAndPeers(numClusters); int[] expectedCounts = new int[] { 2, 2 }; @@ -158,12 +171,64 @@ public class TestMasterReplication { } /** - * Tests the cyclic replication scenario of 0 -> 1 -> 2 -> 0 by adding and - * deleting rows to a table in each clusters and ensuring that the each of - * these clusters get the appropriate mutations. It also tests the grouping - * scenario where a cluster needs to replicate the edits originating from - * itself and also the edits that it received using replication from a - * different cluster. The scenario is explained in HBASE-9158 + * It tests the replication scenario involving 0 -> 1 -> 0. It does it by bulk loading a set of + * HFiles to a table in each cluster, checking if it's replicated. + */ + @Test(timeout = 300000) + public void testHFileCyclicReplication() throws Exception { + LOG.info("testHFileCyclicReplication"); + int numClusters = 2; + Table[] htables = null; + try { + htables = setUpClusterTablesAndPeers(numClusters); + + // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated + // to cluster '1'. + byte[][][] hfileRanges = + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + int numOfRows = 100; + int[] expectedCounts = + new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; + + loadAndValidateHFileReplication("testHFileCyclicReplication_01", 0, new int[] { 1 }, row, + famName, htables, hfileRanges, numOfRows, expectedCounts, true); + + // Load 200 rows for each hfile range in cluster '1' and validate whether its been replicated + // to cluster '0'. + hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + numOfRows = 200; + int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], + hfileRanges.length * numOfRows + expectedCounts[1] }; + + loadAndValidateHFileReplication("testHFileCyclicReplication_10", 1, new int[] { 0 }, row, + famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); + + } finally { + close(htables); + shutDownMiniClusters(); + } + } + + private Table[] setUpClusterTablesAndPeers(int numClusters) throws Exception { + Table[] htables; + startMiniClusters(numClusters); + createTableOnClusters(table); + + htables = getHTablesOnClusters(tableName); + // Test the replication scenarios of 0 -> 1 -> 0 + addPeer("1", 0, 1); + addPeer("1", 1, 0); + return htables; + } + + /** + * Tests the cyclic replication scenario of 0 -> 1 -> 2 -> 0 by adding and deleting rows to a + * table in each clusters and ensuring that the each of these clusters get the appropriate + * mutations. It also tests the grouping scenario where a cluster needs to replicate the edits + * originating from itself and also the edits that it received using replication from a different + * cluster. The scenario is explained in HBASE-9158 */ @Test(timeout = 300000) public void testCyclicReplication2() throws Exception { @@ -213,6 +278,119 @@ public class TestMasterReplication { } } + /** + * It tests the multi slave hfile replication scenario involving 0 -> 1, 2. It does it by bulk + * loading a set of HFiles to a table in master cluster, checking if it's replicated in its peers. + */ + @Test(timeout = 300000) + public void testHFileMultiSlaveReplication() throws Exception { + LOG.info("testHFileMultiSlaveReplication"); + int numClusters = 3; + Table[] htables = null; + try { + startMiniClusters(numClusters); + createTableOnClusters(table); + + // Add a slave, 0 -> 1 + addPeer("1", 0, 1); + + htables = getHTablesOnClusters(tableName); + + // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated + // to cluster '1'. + byte[][][] hfileRanges = + new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, + new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; + int numOfRows = 100; + + int[] expectedCounts = + new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; + + loadAndValidateHFileReplication("testHFileCyclicReplication_0", 0, new int[] { 1 }, row, + famName, htables, hfileRanges, numOfRows, expectedCounts, true); + + // Validate data is not replicated to cluster '2'. + assertEquals(0, utilities[2].countRows(htables[2])); + + rollWALAndWait(utilities[0], htables[0].getName(), row); + + // Add one more slave, 0 -> 2 + addPeer("2", 0, 2); + + // Load 200 rows for each hfile range in cluster '0' and validate whether its been replicated + // to cluster '1' and '2'. Previous data should be replicated to cluster '2'. + hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("ssss"), Bytes.toBytes("uuuu") }, + new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; + numOfRows = 200; + + int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], + hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; + + loadAndValidateHFileReplication("testHFileCyclicReplication_1", 0, new int[] { 1, 2 }, row, + famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); + + } finally { + close(htables); + shutDownMiniClusters(); + } + } + + /** + * It tests the bulk loaded hfile replication scenario to only explicitly specified table column + * families. It does it by bulk loading a set of HFiles belonging to both the CFs of table and set + * only one CF data to replicate. + */ + @Test(timeout = 300000) + public void testHFileReplicationForConfiguredTableCfs() throws Exception { + LOG.info("testHFileReplicationForConfiguredTableCfs"); + int numClusters = 2; + Table[] htables = null; + try { + startMiniClusters(numClusters); + createTableOnClusters(table); + + htables = getHTablesOnClusters(tableName); + // Test the replication scenarios only 'f' is configured for table data replication not 'f1' + addPeer("1", 0, 1, tableName.getNameAsString() + ":" + Bytes.toString(famName)); + + // Load 100 rows for each hfile range in cluster '0' for table CF 'f' + byte[][][] hfileRanges = + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + int numOfRows = 100; + int[] expectedCounts = + new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; + + loadAndValidateHFileReplication("load_f", 0, new int[] { 1 }, row, famName, htables, + hfileRanges, numOfRows, expectedCounts, true); + + // Load 100 rows for each hfile range in cluster '0' for table CF 'f1' + hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + numOfRows = 100; + + int[] newExpectedCounts = + new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; + + loadAndValidateHFileReplication("load_f1", 0, new int[] { 1 }, row, famName1, htables, + hfileRanges, numOfRows, newExpectedCounts, false); + + // Validate data replication for CF 'f1' + + // Source cluster table should contain data for the families + wait(0, htables[0], hfileRanges.length * numOfRows + expectedCounts[0]); + + // Sleep for enough time so that the data is still not replicated for the CF which is not + // configured for replication + Thread.sleep((NB_RETRIES / 2) * SLEEP_TIME); + // Peer cluster should have only configured CF data + wait(1, htables[1], expectedCounts[1]); + } finally { + close(htables); + shutDownMiniClusters(); + } + } + /** * Tests cyclic replication scenario of 0 -> 1 -> 2 -> 1. */ @@ -336,6 +514,17 @@ public class TestMasterReplication { close(replicationAdmin); } } + + private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber, String tableCfs) + throws Exception { + ReplicationAdmin replicationAdmin = null; + try { + replicationAdmin = new ReplicationAdmin(configurations[masterClusterNumber]); + replicationAdmin.addPeer(id, utilities[slaveClusterNumber].getClusterKey(), tableCfs); + } finally { + close(replicationAdmin); + } + } private void disablePeer(String id, int masterClusterNumber) throws Exception { ReplicationAdmin replicationAdmin = null; @@ -413,8 +602,56 @@ public class TestMasterReplication { wait(row, target, false); } - private void wait(byte[] row, Table target, boolean isDeleted) - throws Exception { + private void loadAndValidateHFileReplication(String testName, int masterNumber, + int[] slaveNumbers, byte[] row, byte[] fam, Table[] tables, byte[][][] hfileRanges, + int numOfRows, int[] expectedCounts, boolean toValidate) throws Exception { + HBaseTestingUtility util = utilities[masterNumber]; + + Path dir = util.getDataTestDirOnTestFS(testName); + FileSystem fs = util.getTestFileSystem(); + dir = dir.makeQualified(fs); + Path familyDir = new Path(dir, Bytes.toString(fam)); + + int hfileIdx = 0; + for (byte[][] range : hfileRanges) { + byte[] from = range[0]; + byte[] to = range[1]; + HFileTestUtil.createHFile(util.getConfiguration(), fs, + new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows); + } + + Table source = tables[masterNumber]; + final TableName tableName = source.getName(); + LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()); + String[] args = { dir.toString(), tableName.toString() }; + loader.run(args); + + if (toValidate) { + for (int slaveClusterNumber : slaveNumbers) { + wait(slaveClusterNumber, tables[slaveClusterNumber], expectedCounts[slaveClusterNumber]); + } + } + } + + private void wait(int slaveNumber, Table target, int expectedCount) + throws IOException, InterruptedException { + int count = 0; + for (int i = 0; i < NB_RETRIES; i++) { + if (i == NB_RETRIES - 1) { + fail("Waited too much time for bulkloaded data replication. Current count=" + count + + ", expected count=" + expectedCount); + } + count = utilities[slaveNumber].countRows(target); + if (count != expectedCount) { + LOG.info("Waiting more time for bulkloaded data replication."); + Thread.sleep(SLEEP_TIME); + } else { + break; + } + } + } + + private void wait(byte[] row, Table target, boolean isDeleted) throws Exception { Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { @@ -438,6 +675,47 @@ public class TestMasterReplication { } } + private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table, + final byte[] row) throws IOException { + final Admin admin = utility.getHBaseAdmin(); + final MiniHBaseCluster cluster = utility.getMiniHBaseCluster(); + + // find the region that corresponds to the given row. + HRegion region = null; + for (HRegion candidate : cluster.getRegions(table)) { + if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) { + region = candidate; + break; + } + } + assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region); + + final CountDownLatch latch = new CountDownLatch(1); + + // listen for successful log rolls + final WALActionsListener listener = new WALActionsListener.Base() { + @Override + public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { + latch.countDown(); + } + }; + region.getWAL().registerWALActionsListener(listener); + + // request a roll + admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(), + region.getRegionInfo().getRegionName())); + + // wait + try { + latch.await(); + } catch (InterruptedException exception) { + LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + + "replication tests fail, it's probably because we should still be waiting."); + Thread.currentThread().interrupt(); + } + region.getWAL().unregisterWALActionsListener(listener); + } + /** * Use a coprocessor to count puts and deletes. as KVs would be replicated back with the same * timestamp there is otherwise no way to count them. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index 6656022674c..42a127f485d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -659,7 +659,8 @@ public class TestReplicationSmallTests extends TestReplicationBase { HRegionInfo hri = new HRegionInfo(htable1.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor); - Replication.scopeWALEdits(htable1.getTableDescriptor(), new WALKey(), edit); + Replication.scopeWALEdits(htable1.getTableDescriptor(), new WALKey(), edit, + htable1.getConfiguration(), null); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index 696c130a988..41c32401194 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.*; +import java.util.ArrayList; import java.util.List; import java.util.SortedMap; import java.util.SortedSet; @@ -159,6 +160,62 @@ public abstract class TestReplicationStateBasic { assertEquals(0, rq2.getListOfReplicators().size()); } + @Test + public void testHfileRefsReplicationQueues() throws ReplicationException, KeeperException { + rp.init(); + rq1.init(server1); + rqc.init(); + + List files1 = new ArrayList(3); + files1.add("file_1"); + files1.add("file_2"); + files1.add("file_3"); + assertNull(rqc.getReplicableHFiles(ID_ONE)); + assertEquals(0, rqc.getAllPeersFromHFileRefsQueue().size()); + rp.addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE), null); + rq1.addHFileRefs(ID_ONE, files1); + assertEquals(1, rqc.getAllPeersFromHFileRefsQueue().size()); + assertEquals(3, rqc.getReplicableHFiles(ID_ONE).size()); + List files2 = new ArrayList<>(files1); + String removedString = files2.remove(0); + rq1.removeHFileRefs(ID_ONE, files2); + assertEquals(1, rqc.getReplicableHFiles(ID_ONE).size()); + files2 = new ArrayList<>(1); + files2.add(removedString); + rq1.removeHFileRefs(ID_ONE, files2); + assertEquals(0, rqc.getReplicableHFiles(ID_ONE).size()); + rp.removePeer(ID_ONE); + } + + @Test + public void testRemovePeerForHFileRefs() throws ReplicationException, KeeperException { + rq1.init(server1); + rqc.init(); + + rp.init(); + rp.addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE), null); + rp.addPeer(ID_TWO, new ReplicationPeerConfig().setClusterKey(KEY_TWO), null); + + List files1 = new ArrayList(3); + files1.add("file_1"); + files1.add("file_2"); + files1.add("file_3"); + rq1.addHFileRefs(ID_ONE, files1); + rq1.addHFileRefs(ID_TWO, files1); + assertEquals(2, rqc.getAllPeersFromHFileRefsQueue().size()); + assertEquals(3, rqc.getReplicableHFiles(ID_ONE).size()); + assertEquals(3, rqc.getReplicableHFiles(ID_TWO).size()); + + rp.removePeer(ID_ONE); + assertEquals(1, rqc.getAllPeersFromHFileRefsQueue().size()); + assertNull(rqc.getReplicableHFiles(ID_ONE)); + assertEquals(3, rqc.getReplicableHFiles(ID_TWO).size()); + + rp.removePeer(ID_TWO); + assertEquals(0, rqc.getAllPeersFromHFileRefsQueue().size()); + assertNull(rqc.getReplicableHFiles(ID_TWO)); + } + @Test public void testReplicationPeers() throws Exception { rp.init(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index a9222ab36a6..d6bf4ea23ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -64,6 +64,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { utility = new HBaseTestingUtility(); utility.startMiniZKCluster(); conf = utility.getConfiguration(); + conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); zkw = HBaseTestingUtility.getZooKeeperWatcher(utility); String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication"); replicationZNode = ZKUtil.joinZNode(zkw.baseZNode, replicationZNodeName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java index 8399ccce3ce..fd02d1ae94b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java @@ -47,15 +47,15 @@ public class TestReplicationSyncUpTool extends TestReplicationBase { private static final TableName t1_su = TableName.valueOf("t1_syncup"); private static final TableName t2_su = TableName.valueOf("t2_syncup"); - private static final byte[] famName = Bytes.toBytes("cf1"); + protected static final byte[] famName = Bytes.toBytes("cf1"); private static final byte[] qualName = Bytes.toBytes("q1"); - private static final byte[] noRepfamName = Bytes.toBytes("norep"); + protected static final byte[] noRepfamName = Bytes.toBytes("norep"); private HTableDescriptor t1_syncupSource, t1_syncupTarget; private HTableDescriptor t2_syncupSource, t2_syncupTarget; - private Table ht1Source, ht2Source, ht1TargetAtPeer1, ht2TargetAtPeer1; + protected Table ht1Source, ht2Source, ht1TargetAtPeer1, ht2TargetAtPeer1; @Before public void setUp() throws Exception { @@ -174,7 +174,7 @@ public class TestReplicationSyncUpTool extends TestReplicationBase { } - private void setupReplication() throws Exception { + protected void setupReplication() throws Exception { ReplicationAdmin admin1 = new ReplicationAdmin(conf1); ReplicationAdmin admin2 = new ReplicationAdmin(conf2); @@ -410,7 +410,7 @@ public class TestReplicationSyncUpTool extends TestReplicationBase { } } - private void syncUp(HBaseTestingUtility ut) throws Exception { + protected void syncUp(HBaseTestingUtility ut) throws Exception { ReplicationSyncUp.setConfigure(ut.getConfiguration()); String[] arguments = new String[] { null }; new ReplicationSyncUp().run(arguments); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java new file mode 100644 index 00000000000..f54c6325d18 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java @@ -0,0 +1,235 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.replication.regionserver.TestSourceFSConfigurationProvider; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.HFileTestUtil; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({ ReplicationTests.class, LargeTests.class }) +public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplicationSyncUpTool { + + private static final Log LOG = LogFactory + .getLog(TestReplicationSyncUpToolWithBulkLoadedData.class); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf1.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); + conf1.set(HConstants.REPLICATION_CLUSTER_ID, "12345"); + conf1.set("hbase.replication.source.fs.conf.provider", + TestSourceFSConfigurationProvider.class.getCanonicalName()); + String classes = conf1.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, ""); + if (!classes.contains("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint")) { + classes = classes + ",org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"; + conf1.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, classes); + } + + TestReplicationBase.setUpBeforeClass(); + } + + @Override + public void testSyncUpTool() throws Exception { + /** + * Set up Replication: on Master and one Slave Table: t1_syncup and t2_syncup columnfamily: + * 'cf1' : replicated 'norep': not replicated + */ + setupReplication(); + + /** + * Prepare 16 random hfile ranges required for creating hfiles + */ + Iterator randomHFileRangeListIterator = null; + Set randomHFileRanges = new HashSet(16); + for (int i = 0; i < 16; i++) { + randomHFileRanges.add(UUID.randomUUID().toString()); + } + List randomHFileRangeList = new ArrayList<>(randomHFileRanges); + Collections.sort(randomHFileRangeList); + randomHFileRangeListIterator = randomHFileRangeList.iterator(); + + /** + * at Master: t1_syncup: Load 100 rows into cf1, and 3 rows into norep t2_syncup: Load 200 rows + * into cf1, and 3 rows into norep verify correctly replicated to slave + */ + loadAndReplicateHFiles(true, randomHFileRangeListIterator); + + /** + * Verify hfile load works step 1: stop hbase on Slave step 2: at Master: t1_syncup: Load + * another 100 rows into cf1 and 3 rows into norep t2_syncup: Load another 200 rows into cf1 and + * 3 rows into norep step 3: stop hbase on master, restart hbase on Slave step 4: verify Slave + * still has the rows before load t1_syncup: 100 rows from cf1 t2_syncup: 200 rows from cf1 step + * 5: run syncup tool on Master step 6: verify that hfiles show up on Slave and 'norep' does not + * t1_syncup: 200 rows from cf1 t2_syncup: 400 rows from cf1 verify correctly replicated to + * Slave + */ + mimicSyncUpAfterBulkLoad(randomHFileRangeListIterator); + + } + + private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListIterator) + throws Exception { + LOG.debug("mimicSyncUpAfterBulkLoad"); + utility2.shutdownMiniHBaseCluster(); + + loadAndReplicateHFiles(false, randomHFileRangeListIterator); + + int rowCount_ht1Source = utility1.countRows(ht1Source); + assertEquals("t1_syncup has 206 rows on source, after bulk load of another 103 hfiles", 206, + rowCount_ht1Source); + + int rowCount_ht2Source = utility1.countRows(ht2Source); + assertEquals("t2_syncup has 406 rows on source, after bulk load of another 203 hfiles", 406, + rowCount_ht2Source); + + utility1.shutdownMiniHBaseCluster(); + utility2.restartHBaseCluster(1); + + Thread.sleep(SLEEP_TIME); + + // Before sync up + int rowCount_ht1TargetAtPeer1 = utility2.countRows(ht1TargetAtPeer1); + int rowCount_ht2TargetAtPeer1 = utility2.countRows(ht2TargetAtPeer1); + assertEquals("@Peer1 t1_syncup should still have 100 rows", 100, rowCount_ht1TargetAtPeer1); + assertEquals("@Peer1 t2_syncup should still have 200 rows", 200, rowCount_ht2TargetAtPeer1); + + // Run sync up tool + syncUp(utility1); + + // After syun up + for (int i = 0; i < NB_RETRIES; i++) { + syncUp(utility1); + rowCount_ht1TargetAtPeer1 = utility2.countRows(ht1TargetAtPeer1); + rowCount_ht2TargetAtPeer1 = utility2.countRows(ht2TargetAtPeer1); + if (i == NB_RETRIES - 1) { + if (rowCount_ht1TargetAtPeer1 != 200 || rowCount_ht2TargetAtPeer1 != 400) { + // syncUP still failed. Let's look at the source in case anything wrong there + utility1.restartHBaseCluster(1); + rowCount_ht1Source = utility1.countRows(ht1Source); + LOG.debug("t1_syncup should have 206 rows at source, and it is " + rowCount_ht1Source); + rowCount_ht2Source = utility1.countRows(ht2Source); + LOG.debug("t2_syncup should have 406 rows at source, and it is " + rowCount_ht2Source); + } + assertEquals("@Peer1 t1_syncup should be sync up and have 200 rows", 200, + rowCount_ht1TargetAtPeer1); + assertEquals("@Peer1 t2_syncup should be sync up and have 400 rows", 400, + rowCount_ht2TargetAtPeer1); + } + if (rowCount_ht1TargetAtPeer1 == 200 && rowCount_ht2TargetAtPeer1 == 400) { + LOG.info("SyncUpAfterBulkLoad succeeded at retry = " + i); + break; + } else { + LOG.debug("SyncUpAfterBulkLoad failed at retry = " + i + ", with rowCount_ht1TargetPeer1 =" + + rowCount_ht1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 =" + + rowCount_ht2TargetAtPeer1); + } + Thread.sleep(SLEEP_TIME); + } + } + + private void loadAndReplicateHFiles(boolean verifyReplicationOnSlave, + Iterator randomHFileRangeListIterator) throws Exception { + LOG.debug("loadAndReplicateHFiles"); + + // Load 100 + 3 hfiles to t1_syncup. + byte[][][] hfileRanges = + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + loadAndValidateHFileReplication("HFileReplication_1", row, famName, ht1Source, hfileRanges, + 100); + + hfileRanges = + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + loadAndValidateHFileReplication("HFileReplication_1", row, noRepfamName, ht1Source, + hfileRanges, 3); + + // Load 200 + 3 hfiles to t2_syncup. + hfileRanges = + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + loadAndValidateHFileReplication("HFileReplication_1", row, famName, ht2Source, hfileRanges, + 200); + + hfileRanges = + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + loadAndValidateHFileReplication("HFileReplication_1", row, noRepfamName, ht2Source, + hfileRanges, 3); + + if (verifyReplicationOnSlave) { + // ensure replication completed + wait(ht1TargetAtPeer1, utility1.countRows(ht1Source) - 3, + "t1_syncup has 103 rows on source, and 100 on slave1"); + + wait(ht2TargetAtPeer1, utility1.countRows(ht2Source) - 3, + "t2_syncup has 203 rows on source, and 200 on slave1"); + } + } + + private void loadAndValidateHFileReplication(String testName, byte[] row, byte[] fam, + Table source, byte[][][] hfileRanges, int numOfRows) throws Exception { + Path dir = utility1.getDataTestDirOnTestFS(testName); + FileSystem fs = utility1.getTestFileSystem(); + dir = dir.makeQualified(fs); + Path familyDir = new Path(dir, Bytes.toString(fam)); + + int hfileIdx = 0; + for (byte[][] range : hfileRanges) { + byte[] from = range[0]; + byte[] to = range[1]; + HFileTestUtil.createHFile(utility1.getConfiguration(), fs, new Path(familyDir, "hfile_" + + hfileIdx++), fam, row, from, to, numOfRows); + } + + final TableName tableName = source.getName(); + LoadIncrementalHFiles loader = new LoadIncrementalHFiles(utility1.getConfiguration()); + String[] args = { dir.toString(), tableName.toString() }; + loader.run(args); + } + + private void wait(Table target, int expectedCount, String msg) throws IOException, + InterruptedException { + for (int i = 0; i < NB_RETRIES; i++) { + int rowCount_ht2TargetAtPeer1 = utility2.countRows(target); + if (i == NB_RETRIES - 1) { + assertEquals(msg, expectedCount, rowCount_ht2TargetAtPeer1); + } + if (expectedCount == rowCount_ht2TargetAtPeer1) { + break; + } + Thread.sleep(SLEEP_TIME); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java index db58ccbdb7c..e148344c4f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java @@ -21,8 +21,16 @@ package org.apache.hadoop.hbase.replication.regionserver; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.security.SecureRandom; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.hbase.client.Table; @@ -30,22 +38,34 @@ import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -57,21 +77,18 @@ public class TestReplicationSink { private static final Log LOG = LogFactory.getLog(TestReplicationSink.class); private static final int BATCH_SIZE = 10; - private final static HBaseTestingUtility TEST_UTIL = - new HBaseTestingUtility(); + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static ReplicationSink SINK; + protected static ReplicationSink SINK; - private static final TableName TABLE_NAME1 = - TableName.valueOf("table1"); - private static final TableName TABLE_NAME2 = - TableName.valueOf("table2"); + protected static final TableName TABLE_NAME1 = TableName.valueOf("table1"); + protected static final TableName TABLE_NAME2 = TableName.valueOf("table2"); - private static final byte[] FAM_NAME1 = Bytes.toBytes("info1"); - private static final byte[] FAM_NAME2 = Bytes.toBytes("info2"); + protected static final byte[] FAM_NAME1 = Bytes.toBytes("info1"); + protected static final byte[] FAM_NAME2 = Bytes.toBytes("info2"); - private static Table table1; - private static Stoppable STOPPABLE = new Stoppable() { + protected static Table table1; + protected static Stoppable STOPPABLE = new Stoppable() { final AtomicBoolean stop = new AtomicBoolean(false); @Override @@ -84,10 +101,13 @@ public class TestReplicationSink { LOG.info("STOPPING BECAUSE: " + why); this.stop.set(true); } - + }; - private static Table table2; + protected static Table table2; + protected static String baseNamespaceDir; + protected static String hfileArchiveDir; + protected static String replicationClusterId; /** * @throws java.lang.Exception @@ -97,11 +117,18 @@ public class TestReplicationSink { TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.getConfiguration().setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); + TEST_UTIL.getConfiguration().set("hbase.replication.source.fs.conf.provider", + TestSourceFSConfigurationProvider.class.getCanonicalName()); + TEST_UTIL.startMiniCluster(3); SINK = new ReplicationSink(new Configuration(TEST_UTIL.getConfiguration()), STOPPABLE); table1 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAME1); table2 = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAME2); + Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration()); + baseNamespaceDir = new Path(rootDir, new Path(HConstants.BASE_NAMESPACE_DIR)).toString(); + hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY)).toString(); + replicationClusterId = "12345"; } /** @@ -133,7 +160,8 @@ public class TestReplicationSink { for(int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length); @@ -150,7 +178,8 @@ public class TestReplicationSink { for(int i = 0; i < BATCH_SIZE/2; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells)); + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId, + baseNamespaceDir, hfileArchiveDir); entries = new ArrayList(BATCH_SIZE); cells = new ArrayList(); @@ -159,7 +188,8 @@ public class TestReplicationSink { i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length); @@ -178,7 +208,8 @@ public class TestReplicationSink { i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); ResultScanner scanRes = table2.getScanner(scan); for(Result res : scanRes) { @@ -197,14 +228,16 @@ public class TestReplicationSink { for(int i = 0; i < 3; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); entries = new ArrayList(3); cells = new ArrayList(); entries.add(createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn, cells)); entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells)); entries.add(createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn, cells)); - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); @@ -227,12 +260,96 @@ public class TestReplicationSink { for(int i = 3; i < 5; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } - SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); Get get = new Get(Bytes.toBytes(1)); Result res = table1.get(get); assertEquals(0, res.size()); } + /** + * Test replicateEntries with a bulk load entry for 25 HFiles + */ + @Test + public void testReplicateEntriesForHFiles() throws Exception { + Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries"); + Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1)); + int numRows = 10; + + List p = new ArrayList<>(1); + + // 1. Generate 25 hfile ranges + Random rng = new SecureRandom(); + Set numbers = new HashSet<>(); + while (numbers.size() < 50) { + numbers.add(rng.nextInt(1000)); + } + List numberList = new ArrayList<>(numbers); + Collections.sort(numberList); + + // 2. Create 25 hfiles + Configuration conf = TEST_UTIL.getConfiguration(); + FileSystem fs = dir.getFileSystem(conf); + Iterator numbersItr = numberList.iterator(); + for (int i = 0; i < 25; i++) { + Path hfilePath = new Path(familyDir, "hfile_" + i); + HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, + Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows); + p.add(hfilePath); + } + + // 3. Create a BulkLoadDescriptor and a WALEdit + Map> storeFiles = new HashMap<>(1); + storeFiles.put(FAM_NAME1, p); + WALEdit edit = null; + WALProtos.BulkLoadDescriptor loadDescriptor = null; + + try (Connection c = ConnectionFactory.createConnection(conf); + RegionLocator l = c.getRegionLocator(TABLE_NAME1)) { + HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo(); + loadDescriptor = + ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, + ByteStringer.wrap(regionInfo.getEncodedNameAsBytes()), storeFiles, 1); + edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor); + } + List entries = new ArrayList(1); + + // 4. Create a WALEntryBuilder + WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1); + + // 5. Copy the hfile to the path as it is in reality + for (int i = 0; i < 25; i++) { + String pathToHfileFromNS = + new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR) + .append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR) + .append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())) + .append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR) + .append("hfile_" + i).toString(); + String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS; + + FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf); + } + + entries.add(builder.build()); + ResultScanner scanRes = null; + try { + Scan scan = new Scan(); + scanRes = table1.getScanner(scan); + // 6. Assert no existing data in table + assertEquals(0, scanRes.next(numRows).length); + // 7. Replicate the bulk loaded entry + SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), + replicationClusterId, baseNamespaceDir, hfileArchiveDir); + scanRes = table1.getScanner(scan); + // 8. Assert data is replicated + assertEquals(numRows, scanRes.next(numRows).length); + } finally { + if (scanRes != null) { + scanRes.close(); + } + } + } + private WALEntry createEntry(TableName table, int row, KeyValue.Type type, List cells) { byte[] fam = table.equals(TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2; byte[] rowBytes = Bytes.toBytes(row); @@ -255,6 +372,13 @@ public class TestReplicationSink { kv = new KeyValue(rowBytes, fam, null, now, KeyValue.Type.DeleteFamily); } + WALEntry.Builder builder = createWALEntryBuilder(table); + cells.add(kv); + + return builder.build(); + } + + private WALEntry.Builder createWALEntryBuilder(TableName table) { WALEntry.Builder builder = WALEntry.newBuilder(); builder.setAssociatedCellCount(1); WALKey.Builder keyBuilder = WALKey.newBuilder(); @@ -263,13 +387,10 @@ public class TestReplicationSink { uuidBuilder.setMostSigBits(HConstants.DEFAULT_CLUSTER_ID.getMostSignificantBits()); keyBuilder.setClusterId(uuidBuilder.build()); keyBuilder.setTableName(ByteStringer.wrap(table.getName())); - keyBuilder.setWriteTime(now); + keyBuilder.setWriteTime(System.currentTimeMillis()); keyBuilder.setEncodedRegionName(ByteStringer.wrap(HConstants.EMPTY_BYTE_ARRAY)); keyBuilder.setLogSequenceNumber(-1); builder.setKey(keyBuilder.build()); - cells.add(kv); - - return builder.build(); + return builder; } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 9f7f36d0de1..6b945d2ef86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -19,13 +19,17 @@ package org.apache.hadoop.hbase.replication.regionserver; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.net.URLEncoder; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.NavigableMap; import java.util.SortedMap; import java.util.SortedSet; import java.util.TreeSet; @@ -51,6 +55,8 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -63,6 +69,7 @@ import org.apache.hadoop.hbase.replication.ReplicationSourceDummy; import org.apache.hadoop.hbase.replication.ReplicationStateZKBase; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WAL; @@ -107,6 +114,8 @@ public class TestReplicationSourceManager { private static final byte[] f1 = Bytes.toBytes("f1"); + private static final byte[] f2 = Bytes.toBytes("f2"); + private static final TableName test = TableName.valueOf("test"); @@ -160,10 +169,10 @@ public class TestReplicationSourceManager { manager.addSource(slaveId); htd = new HTableDescriptor(test); - HColumnDescriptor col = new HColumnDescriptor("f1"); + HColumnDescriptor col = new HColumnDescriptor(f1); col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); htd.addFamily(col); - col = new HColumnDescriptor("f2"); + col = new HColumnDescriptor(f2); col.setScope(HConstants.REPLICATION_SCOPE_LOCAL); htd.addFamily(col); @@ -415,6 +424,63 @@ public class TestReplicationSourceManager { s0.abort("", null); } + @Test + public void testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled() throws Exception { + // 1. Create wal key + WALKey logKey = new WALKey(); + // 2. Get the bulk load wal edit event + WALEdit logEdit = getBulkLoadWALEdit(); + + // 3. Get the scopes for the key + Replication.scopeWALEdits(htd, logKey, logEdit, conf, manager); + + // 4. Assert that no bulk load entry scopes are added if bulk load hfile replication is disabled + assertNull("No bulk load entries scope should be added if bulk load replication is diabled.", + logKey.getScopes()); + } + + @Test + public void testBulkLoadWALEdits() throws Exception { + // 1. Create wal key + WALKey logKey = new WALKey(); + // 2. Get the bulk load wal edit event + WALEdit logEdit = getBulkLoadWALEdit(); + // 3. Enable bulk load hfile replication + Configuration bulkLoadConf = HBaseConfiguration.create(conf); + bulkLoadConf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); + + // 4. Get the scopes for the key + Replication.scopeWALEdits(htd, logKey, logEdit, bulkLoadConf, manager); + + NavigableMap scopes = logKey.getScopes(); + // Assert family with replication scope global is present in the key scopes + assertTrue("This family scope is set to global, should be part of replication key scopes.", + scopes.containsKey(f1)); + // Assert family with replication scope local is not present in the key scopes + assertFalse("This family scope is set to local, should not be part of replication key scopes", + scopes.containsKey(f2)); + } + + private WALEdit getBulkLoadWALEdit() { + // 1. Create store files for the families + Map> storeFiles = new HashMap<>(1); + List p = new ArrayList<>(1); + p.add(new Path(Bytes.toString(f1))); + storeFiles.put(f1, p); + + p = new ArrayList<>(1); + p.add(new Path(Bytes.toString(f2))); + storeFiles.put(f2, p); + + // 2. Create bulk load descriptor + BulkLoadDescriptor desc = ProtobufUtil.toBulkLoadDescriptor(hri.getTable(), + ByteStringer.wrap(hri.getEncodedNameAsBytes()), storeFiles, 1); + + // 3. create bulk load wal edit event + WALEdit logEdit = WALEdit.createBulkLoadEvent(hri, desc); + return logEdit; + } + static class DummyNodeFailoverWorker extends Thread { private SortedMap> logZnodesMap; Server server; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java new file mode 100644 index 00000000000..a14c02bd762 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private +public class TestSourceFSConfigurationProvider implements SourceFSConfigurationProvider { + @Override + public Configuration getConf(Configuration sinkConf, String replicationClusterId) + throws IOException { + return sinkConf; + } +} From 2d74dcfadcb216a19b7502590f93cc2b350a7546 Mon Sep 17 00:00:00 2001 From: Matteo Bertozzi Date: Thu, 10 Dec 2015 13:22:48 -0800 Subject: [PATCH 07/72] HBASE-14941 locate_region shell command --- hbase-shell/src/main/ruby/hbase/admin.rb | 26 ++++++++--- hbase-shell/src/main/ruby/shell.rb | 1 + .../main/ruby/shell/commands/locate_region.rb | 44 +++++++++++++++++++ 3 files changed, 64 insertions(+), 7 deletions(-) create mode 100644 hbase-shell/src/main/ruby/shell/commands/locate_region.rb diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 4b4ea24c7be..7765ef03c44 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -99,6 +99,15 @@ module Hbase end end + def locate_region(table_name, row_key) + locator = @connection.getRegionLocator(TableName.valueOf(table_name)) + begin + return locator.getRegionLocation(Bytes.toBytesBinary(row_key)) + ensure + locator.close() + end + end + #---------------------------------------------------------------------------------------------- # Requests a cluster balance # Returns true if balancer ran @@ -436,10 +445,13 @@ module Hbase def truncate_preserve(table_name, conf = @conf) h_table = @connection.getTable(TableName.valueOf(table_name)) locator = @connection.getRegionLocator(TableName.valueOf(table_name)) - splits = locator.getAllRegionLocations(). - map{|i| Bytes.toString(i.getRegionInfo().getStartKey)}. - delete_if{|k| k == ""}.to_java :String - locator.close() + begin + splits = locator.getAllRegionLocations(). + map{|i| Bytes.toString(i.getRegionInfo().getStartKey)}. + delete_if{|k| k == ""}.to_java :String + ensure + locator.close() + end table_description = @admin.getTableDescriptor(TableName.valueOf(table_name)) yield 'Disabling table...' if block_given? @@ -674,7 +686,7 @@ module Hbase end elsif format == "replication" #check whether replication is enabled or not - if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_KEY, + if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_KEY, org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_DEFAULT)) puts("Please enable replication first.") else @@ -686,7 +698,7 @@ module Hbase rSourceString = " SOURCE:" rLoadSink = sl.getReplicationLoadSink() rSinkString << " AgeOfLastAppliedOp=" + rLoadSink.getAgeOfLastAppliedOp().to_s - rSinkString << ", TimeStampsOfLastAppliedOp=" + + rSinkString << ", TimeStampsOfLastAppliedOp=" + (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString() rLoadSourceList = sl.getReplicationLoadSourceList() index = 0 @@ -695,7 +707,7 @@ module Hbase rSourceString << " PeerID=" + rLoadSource.getPeerID() rSourceString << ", AgeOfLastShippedOp=" + rLoadSource.getAgeOfLastShippedOp().to_s rSourceString << ", SizeOfLogQueue=" + rLoadSource.getSizeOfLogQueue().to_s - rSourceString << ", TimeStampsOfLastShippedOp=" + + rSourceString << ", TimeStampsOfLastShippedOp=" + (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString() rSourceString << ", Replication Lag=" + rLoadSource.getReplicationLag().to_s index = index + 1 diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 7bcc60e5b6c..067111818f5 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -267,6 +267,7 @@ Shell.load_command_group( alter_status alter_async get_table + locate_region ], :aliases => { 'describe' => ['desc'] diff --git a/hbase-shell/src/main/ruby/shell/commands/locate_region.rb b/hbase-shell/src/main/ruby/shell/commands/locate_region.rb new file mode 100644 index 00000000000..b1e8c7bf485 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/locate_region.rb @@ -0,0 +1,44 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class LocateRegion < Command + def help + return <<-EOF +Locate the region given a table name and a row-key + + hbase> locate_region 'tableName', 'key0' +EOF + end + + def command(table, row_key) + now = Time.now + + region_location = admin.locate_region(table, row_key) + hri = region_location.getRegionInfo() + + formatter.header([ "HOST", "REGION" ]) + formatter.row([region_location.getHostnamePort(), hri.toString()]) + formatter.footer(now, 1) + end + end + end +end From 8508dd07ff8038f6df192087c308b816baeac29d Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Mon, 7 Dec 2015 18:33:35 -0800 Subject: [PATCH 08/72] HBASE-14946 Don't allow multi's to over run the max result size. Summary: * Add VersionInfoUtil to determine if a client has a specified version or better * Add an exception type to say that the response should be chunked * Add on client knowledge of retry exceptions * Add on metrics for how often this happens Test Plan: Added a unit test Differential Revision: https://reviews.facebook.net/D51771 --- .../hbase/MultiActionResultTooLarge.java | 31 +++++ .../hbase/RetryImmediatelyException.java | 27 ++++ .../hadoop/hbase/client/AsyncProcess.java | 89 +++++++++---- .../hbase/client/ConnectionManager.java | 4 +- .../apache/hadoop/hbase/client/Result.java | 3 + .../hbase/ipc/MetricsHBaseServerSource.java | 8 +- .../ipc/MetricsHBaseServerSourceImpl.java | 9 ++ .../hadoop/hbase/client/VersionInfoUtil.java | 63 +++++++++ .../hadoop/hbase/ipc/MetricsHBaseServer.java | 3 + .../hadoop/hbase/ipc/RpcCallContext.java | 23 +++- .../apache/hadoop/hbase/ipc/RpcServer.java | 28 +++- .../procedure/ProcedurePrepareLatch.java | 23 +--- .../hbase/regionserver/RSRpcServices.java | 124 ++++++++++++------ .../hbase/client/TestMultiRespectsLimits.java | 102 ++++++++++++++ 14 files changed, 449 insertions(+), 88 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java new file mode 100644 index 00000000000..d06eea1b974 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +/** + * Exception thrown when the result needs to be chunked on the server side. + * It signals that retries should happen right away and not count against the number of + * retries because some of the multi was a success. + */ +public class MultiActionResultTooLarge extends RetryImmediatelyException { + + public MultiActionResultTooLarge(String s) { + super(s); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java new file mode 100644 index 00000000000..1b3990477ac --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; + +public class RetryImmediatelyException extends IOException { + public RetryImmediatelyException(String s) { + super(s); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 3d55efc89d3..0d093b16e15 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.RetryImmediatelyException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -126,19 +127,36 @@ class AsyncProcess { public void waitUntilDone() throws InterruptedIOException; } - /** Return value from a submit that didn't contain any requests. */ + /** + * Return value from a submit that didn't contain any requests. + */ private static final AsyncRequestFuture NO_REQS_RESULT = new AsyncRequestFuture() { - public final Object[] result = new Object[0]; + + final Object[] result = new Object[0]; + @Override - public boolean hasError() { return false; } + public boolean hasError() { + return false; + } + @Override - public RetriesExhaustedWithDetailsException getErrors() { return null; } + public RetriesExhaustedWithDetailsException getErrors() { + return null; + } + @Override - public List getFailedOperations() { return null; } + public List getFailedOperations() { + return null; + } + @Override - public Object[] getResults() { return result; } + public Object[] getResults() { + return result; + } + @Override - public void waitUntilDone() throws InterruptedIOException {} + public void waitUntilDone() throws InterruptedIOException { + } }; /** Sync point for calls to multiple replicas for the same user request (Get). @@ -306,8 +324,12 @@ class AsyncProcess { * RuntimeException */ private ExecutorService getPool(ExecutorService pool) { - if (pool != null) return pool; - if (this.pool != null) return this.pool; + if (pool != null) { + return pool; + } + if (this.pool != null) { + return this.pool; + } throw new RuntimeException("Neither AsyncProcess nor request have ExecutorService"); } @@ -365,7 +387,9 @@ class AsyncProcess { Row r = it.next(); HRegionLocation loc; try { - if (r == null) throw new IllegalArgumentException("#" + id + ", row cannot be null"); + if (r == null) { + throw new IllegalArgumentException("#" + id + ", row cannot be null"); + } // Make sure we get 0-s replica. RegionLocations locs = connection.locateRegion( tableName, r.getRow(), true, true, RegionReplicaUtil.DEFAULT_REPLICA_ID); @@ -728,10 +752,10 @@ class AsyncProcess { // Normal case: we received an answer from the server, and it's not an exception. receiveMultiAction(multiAction, server, res, numAttempt); } catch (Throwable t) { - // Something really bad happened. We are on the send thread that will now die. - LOG.error("Internal AsyncProcess #" + id + " error for " - + tableName + " processing for " + server, t); - throw new RuntimeException(t); + // Something really bad happened. We are on the send thread that will now die. + LOG.error("Internal AsyncProcess #" + id + " error for " + + tableName + " processing for " + server, t); + throw new RuntimeException(t); } finally { decTaskCounters(multiAction.getRegions(), server); if (callsInProgress != null && callable != null) { @@ -750,19 +774,25 @@ class AsyncProcess { private final TableName tableName; private final AtomicLong actionsInProgress = new AtomicLong(-1); - /** The lock controls access to results. It is only held when populating results where + /** + * The lock controls access to results. It is only held when populating results where * there might be several callers (eventual consistency gets). For other requests, - * there's one unique call going on per result index. */ + * there's one unique call going on per result index. + */ private final Object replicaResultLock = new Object(); - /** Result array. Null if results are not needed. Otherwise, each index corresponds to + /** + * Result array. Null if results are not needed. Otherwise, each index corresponds to * the action index in initial actions submitted. For most request types, has null-s for * requests that are not done, and result/exception for those that are done. * For eventual-consistency gets, initially the same applies; at some point, replica calls * might be started, and ReplicaResultState is put at the corresponding indices. The * returning calls check the type to detect when this is the case. After all calls are done, - * ReplicaResultState-s are replaced with results for the user. */ + * ReplicaResultState-s are replaced with results for the user. + */ private final Object[] results; - /** Indices of replica gets in results. If null, all or no actions are replica-gets. */ + /** + * Indices of replica gets in results. If null, all or no actions are replica-gets. + */ private final int[] replicaGetIndices; private final boolean hasAnyReplicaGets; private final long nonceGroup; @@ -777,7 +807,9 @@ class AsyncProcess { this.actionsInProgress.set(actions.size()); if (results != null) { assert needResults; - if (results.length != actions.size()) throw new AssertionError("results.length"); + if (results.length != actions.size()) { + throw new AssertionError("results.length"); + } this.results = results; for (int i = 0; i != this.results.length; ++i) { results[i] = null; @@ -1177,9 +1209,13 @@ class AsyncProcess { // We have two contradicting needs here: // 1) We want to get the new location after having slept, as it may change. // 2) We want to take into account the location when calculating the sleep time. + // 3) If all this is just because the response needed to be chunked try again FAST. // It should be possible to have some heuristics to take the right decision. Short term, // we go for one. - long backOffTime = errorsByServer.calculateBackoffTime(oldServer, pause); + boolean retryImmediately = throwable instanceof RetryImmediatelyException; + int nextAttemptNumber = retryImmediately ? numAttempt : numAttempt + 1; + long backOffTime = retryImmediately ? 0 : + errorsByServer.calculateBackoffTime(oldServer, pause); if (numAttempt > startLogErrorsCnt) { // We use this value to have some logs when we have multiple failures, but not too many // logs, as errors are to be expected when a region moves, splits and so on @@ -1188,14 +1224,16 @@ class AsyncProcess { } try { - Thread.sleep(backOffTime); + if (backOffTime > 0) { + Thread.sleep(backOffTime); + } } catch (InterruptedException e) { LOG.warn("#" + id + ", not sent: " + toReplay.size() + " operations, " + oldServer, e); Thread.currentThread().interrupt(); return; } - groupAndSendMultiAction(toReplay, numAttempt + 1); + groupAndSendMultiAction(toReplay, nextAttemptNumber); } private void logNoResubmit(ServerName oldServer, int numAttempt, @@ -1255,6 +1293,7 @@ class AsyncProcess { // Failure: retry if it's make sense else update the errors lists if (result == null || result instanceof Throwable) { Row row = sentAction.getAction(); + throwable = ConnectionManager.findException(result); // Register corresponding failures once per server/once per region. if (!regionFailureRegistered) { regionFailureRegistered = true; @@ -1404,7 +1443,9 @@ class AsyncProcess { // will either see state with callCount 0 after locking it; or will not see state at all // we will replace it with the result. synchronized (state) { - if (state.callCount == 0) return; // someone already set the result + if (state.callCount == 0) { + return; // someone already set the result + } state.callCount = 0; } synchronized (replicaResultLock) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 0e4fb3aa1f5..256a2273e9f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionTooBusyException; +import org.apache.hadoop.hbase.RetryImmediatelyException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; @@ -2714,7 +2715,8 @@ class ConnectionManager { Throwable cur = (Throwable) exception; while (cur != null) { if (cur instanceof RegionMovedException || cur instanceof RegionOpeningException - || cur instanceof RegionTooBusyException || cur instanceof ThrottlingException) { + || cur instanceof RegionTooBusyException || cur instanceof ThrottlingException + || cur instanceof RetryImmediatelyException) { return cur; } if (cur instanceof RemoteException) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index d2959534254..e764c4eaae0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -913,6 +913,9 @@ public class Result implements CellScannable, CellScanner { */ public static long getTotalSizeOfCells(Result result) { long size = 0; + if (result.isEmpty()) { + return size; + } for (Cell c : result.rawCells()) { size += CellUtil.estimatedHeapSizeOf(c); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 5cf71f37eaa..061a67210b7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -74,6 +74,9 @@ public interface MetricsHBaseServerSource extends BaseSource { String EXCEPTIONS_SANITY_NAME="exceptions.FailedSanityCheckException"; String EXCEPTIONS_MOVED_NAME="exceptions.RegionMovedException"; String EXCEPTIONS_NSRE_NAME="exceptions.NotServingRegionException"; + String EXCEPTIONS_MULTI_TOO_LARGE_NAME = "exceptions.multiResponseTooLarge"; + String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + + "rest of the requests will have to be retried."; void authorizationSuccess(); @@ -96,6 +99,7 @@ public interface MetricsHBaseServerSource extends BaseSource { void notServingRegionException(); void unknownScannerException(); void tooBusyException(); + void multiActionTooLargeException(); void sentBytes(long count); @@ -110,4 +114,6 @@ public interface MetricsHBaseServerSource extends BaseSource { void processedCall(int processingTime); void queuedAndProcessedCall(int totalTime); - } + + +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index 78b1c6688bb..dd2299579fb 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -31,6 +31,7 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram; public class MetricsHBaseServerSourceImpl extends BaseSourceImpl implements MetricsHBaseServerSource { + private final MetricsHBaseServerWrapper wrapper; private final MutableCounterLong authorizationSuccesses; private final MutableCounterLong authorizationFailures; @@ -47,6 +48,7 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl private final MutableCounterLong exceptionsSanity; private final MutableCounterLong exceptionsNSRE; private final MutableCounterLong exceptionsMoved; + private final MutableCounterLong exceptionsMultiTooLarge; private MutableHistogram queueCallTime; @@ -81,6 +83,8 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl .newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); this.exceptionsNSRE = this.getMetricsRegistry() .newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsMultiTooLarge = this.getMetricsRegistry() + .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); this.authenticationSuccesses = this.getMetricsRegistry().newCounter( AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); @@ -159,6 +163,11 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl exceptionsBusy.incr(); } + @Override + public void multiActionTooLargeException() { + exceptionsMultiTooLarge.incr(); + } + @Override public void authenticationSuccess() { authenticationSuccesses.incr(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java new file mode 100644 index 00000000000..c40551899ed --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.ipc.RpcCallContext; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + + +/** + * Class to help with parsing the version info. + */ +@InterfaceAudience.Private +public final class VersionInfoUtil { + + private VersionInfoUtil() { + /* UTIL CLASS ONLY */ + } + + public static boolean currentClientHasMinimumVersion(int major, int minor) { + RpcCallContext call = RpcServer.getCurrentCall(); + HBaseProtos.VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null; + return hasMinimumVersion(versionInfo, major, minor); + } + + public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, + int major, + int minor) { + if (versionInfo != null) { + try { + String[] components = versionInfo.getVersion().split("\\."); + + int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0; + if (clientMajor != major) { + return clientMajor > major; + } + + int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0; + return clientMinor >= minor; + } catch (NumberFormatException e) { + return false; + } + } + return false; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index d276503ce4d..05bebb83c42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.ipc; +import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.UnknownScannerException; @@ -105,6 +106,8 @@ public class MetricsHBaseServer { source.notServingRegionException(); } else if (throwable instanceof FailedSanityCheckException) { source.failedSanityException(); + } else if (throwable instanceof MultiActionResultTooLarge) { + source.multiActionTooLargeException(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 976b50873e8..3e38dbf24b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -19,10 +19,11 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetAddress; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.security.User; - +@InterfaceAudience.Private public interface RpcCallContext extends Delayable { /** * Check if the caller who made this IPC call has disconnected. @@ -40,7 +41,7 @@ public interface RpcCallContext extends Delayable { * support cellblocks while fielding requests from clients that do not. * @return True if the client supports cellblocks, else return all content in pb */ - boolean isClientCellBlockSupport(); + boolean isClientCellBlockSupported(); /** * Returns the user credentials associated with the current RPC request or @@ -63,4 +64,22 @@ public interface RpcCallContext extends Delayable { * @return the client version info, or null if the information is not present */ VersionInfo getClientVersionInfo(); + + boolean isRetryImmediatelySupported(); + + /** + * The size of response cells that have been accumulated so far. + * This along with the corresponding increment call is used to ensure that multi's or + * scans dont get too excessively large + */ + long getResponseCellSize(); + + /** + * Add on the given amount to the retained cell size. + * + * This is not thread safe and not synchronized at all. If this is used by more than one thread + * then everything will break. Since this is called for every row synchronization would be too + * onerous. + */ + void incrementResponseCellSize(long cellSize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 575503f6719..1a79e2ecd2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.NeedUnmanagedConnectionException; import org.apache.hadoop.hbase.client.Operation; +import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.exceptions.RegionMovedException; @@ -316,6 +317,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private User user; private InetAddress remoteAddress; + private long responseCellSize = 0; + private boolean retryImmediatelySupported; + Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, Connection connection, Responder responder, long size, TraceInfo tinfo, final InetAddress remoteAddress) { @@ -335,6 +339,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { this.tinfo = tinfo; this.user = connection.user; this.remoteAddress = remoteAddress; + this.retryImmediatelySupported = connection.retryImmediatelySupported; } /** @@ -511,7 +516,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } @Override - public boolean isClientCellBlockSupport() { + public boolean isClientCellBlockSupported() { return this.connection != null && this.connection.codec != null; } @@ -528,6 +533,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { return this.size; } + public long getResponseCellSize() { + return responseCellSize; + } + + public void incrementResponseCellSize(long cellSize) { + responseCellSize += cellSize; + } + /** * If we have a response, and delay is not set, then respond * immediately. Otherwise, do not respond to client. This is @@ -563,6 +576,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { public VersionInfo getClientVersionInfo() { return connection.getVersionInfo(); } + + + @Override + public boolean isRetryImmediatelySupported() { + return retryImmediatelySupported; + } } /** Listens on the socket. Creates jobs for the handler threads*/ @@ -1248,6 +1267,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { // was authentication allowed with a fallback to simple auth private boolean authenticatedWithFallback; + private boolean retryImmediatelySupported = false; + public UserGroupInformation attemptingUser = null; // user name before auth protected User user = null; protected UserGroupInformation ugi = null; @@ -1704,6 +1725,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } } if (connectionHeader.hasVersionInfo()) { + // see if this connection will support RetryImmediatelyException + retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2); + AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort + " with version info: " + TextFormat.shortDebugString(connectionHeader.getVersionInfo())); @@ -1711,6 +1735,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort + " with unknown version info"); } + + } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java index 052386aa9ae..b13e44d3a09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java @@ -24,10 +24,8 @@ import java.util.concurrent.CountDownLatch; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.ipc.RpcCallContext; +import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.procedure2.Procedure; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo; /** * Latch used by the Master to have the prepare() sync behaviour for old @@ -44,24 +42,7 @@ public abstract class ProcedurePrepareLatch { } public static boolean hasProcedureSupport() { - return currentClientHasMinimumVersion(1, 1); - } - - private static boolean currentClientHasMinimumVersion(int major, int minor) { - RpcCallContext call = RpcServer.getCurrentCall(); - VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null; - if (versionInfo != null) { - String[] components = versionInfo.getVersion().split("\\."); - - int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0; - if (clientMajor != major) { - return clientMajor > major; - } - - int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0; - return clientMinor >= minor; - } - return false; + return VersionInfoUtil.currentClientHasMinimumVersion(1, 1); } protected abstract void countDown(final Procedure proc); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index ead2d25e9f4..8f85f512d3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -367,7 +368,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ private boolean isClientCellBlockSupport() { RpcCallContext context = RpcServer.getCurrentCall(); - return context != null && context.isClientCellBlockSupport(); + return context != null && context.isClientCellBlockSupported(); } private void addResult(final MutateResponse.Builder builder, @@ -426,13 +427,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, rm = new RowMutations(action.getMutation().getRow().toByteArray()); } switch (type) { - case PUT: - rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner)); - break; - case DELETE: - rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner)); - break; - default: + case PUT: + rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner)); + break; + case DELETE: + rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner)); + break; + default: throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name()); } } @@ -469,14 +470,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, rm = new RowMutations(action.getMutation().getRow().toByteArray()); } switch (type) { - case PUT: - rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner)); - break; - case DELETE: - rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner)); - break; - default: - throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name()); + case PUT: + rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner)); + break; + case DELETE: + rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner)); + break; + default: + throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name()); } } return region.checkAndRowMutate(row, family, qualifier, compareOp, comparator, rm, Boolean.TRUE); @@ -577,10 +578,43 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // ResultOrException instance that matches each Put or Delete is then added down in the // doBatchOp call. We should be staying aligned though the Put and Delete are deferred/batched List mutations = null; - for (ClientProtos.Action action: actions.getActionList()) { + long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable()); + RpcCallContext context = RpcServer.getCurrentCall(); + IOException sizeIOE = null; + for (ClientProtos.Action action : actions.getActionList()) { ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = null; try { Result r = null; + + if (context != null + && context.isRetryImmediatelySupported() + && context.getResponseCellSize() > maxQuotaResultSize) { + + // We're storing the exception since the exception and reason string won't + // change after the response size limit is reached. + if (sizeIOE == null ) { + // We don't need the stack un-winding do don't throw the exception. + // Throwing will kill the JVM's JIT. + // + // Instead just create the exception and then store it. + sizeIOE = new MultiActionResultTooLarge("Max response size exceeded: " + + context.getResponseCellSize()); + + // Only report the exception once since there's only one request that + // caused the exception. Otherwise this number will dominate the exceptions count. + rpcServer.getMetrics().exception(sizeIOE); + } + + // Now that there's an exception is know to be created + // use it for the response. + // + // This will create a copy in the builder. + resultOrExceptionBuilder = ResultOrException.newBuilder(). + setException(ResponseConverter.buildException(sizeIOE)); + resultOrExceptionBuilder.setIndex(action.getIndex()); + builder.addResultOrException(resultOrExceptionBuilder.build()); + continue; + } if (action.hasGet()) { Get get = ProtobufUtil.toGet(action.getGet()); r = region.get(get); @@ -633,11 +667,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (isClientCellBlockSupport()) { pbResult = ProtobufUtil.toResultNoData(r); // Hard to guess the size here. Just make a rough guess. - if (cellsToReturn == null) cellsToReturn = new ArrayList(); + if (cellsToReturn == null) { + cellsToReturn = new ArrayList(); + } cellsToReturn.add(r); } else { pbResult = ProtobufUtil.toResult(r); } + if (context != null) { + context.incrementResponseCellSize(Result.getTotalSizeOfCells(r)); + } resultOrExceptionBuilder = ClientProtos.ResultOrException.newBuilder().setResult(pbResult); } @@ -719,8 +758,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, case SUCCESS: builder.addResultOrException(getResultOrException( - ClientProtos.Result.getDefaultInstance(), index, - ((HRegion)region).getRegionStats())); + ClientProtos.Result.getDefaultInstance(), index, + ((HRegion) region).getRegionStats())); break; } } @@ -869,13 +908,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); try { rpcServer = new RpcServer(rs, name, getServices(), - bindAddress, // use final bindAddress for this server. - rs.conf, - rpcSchedulerFactory.create(rs.conf, this, rs)); - } catch(BindException be) { + bindAddress, // use final bindAddress for this server. + rs.conf, + rpcSchedulerFactory.create(rs.conf, this, rs)); + } catch (BindException be) { String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : - HConstants.REGIONSERVER_PORT; - throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + + HConstants.REGIONSERVER_PORT; + throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + "' configuration property.", be.getCause() != null ? be.getCause() : be); } @@ -2006,7 +2045,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // It is also the conduit via which we pass back data. PayloadCarryingRpcController controller = (PayloadCarryingRpcController)rpcc; CellScanner cellScanner = controller != null ? controller.cellScanner(): null; - if (controller != null) controller.setCellScanner(null); + if (controller != null) { + controller.setCellScanner(null); + } long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; @@ -2072,7 +2113,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) { controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn)); } - if (processed != null) responseBuilder.setProcessed(processed); + if (processed != null) { + responseBuilder.setProcessed(processed); + } return responseBuilder.build(); } @@ -2089,10 +2132,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // rpc controller is how we bring in data via the back door; it is unprotobuf'ed data. // It is also the conduit via which we pass back data. PayloadCarryingRpcController controller = (PayloadCarryingRpcController)rpcc; - CellScanner cellScanner = controller != null? controller.cellScanner(): null; + CellScanner cellScanner = controller != null ? controller.cellScanner() : null; OperationQuota quota = null; // Clear scanner so we are not holding on to reference across call. - if (controller != null) controller.setCellScanner(null); + if (controller != null) { + controller.setCellScanner(null); + } try { checkOpen(); requestCount.increment(); @@ -2245,6 +2290,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, boolean moreResults = true; boolean closeScanner = false; boolean isSmallScan = false; + RpcCallContext context = RpcServer.getCurrentCall(); ScanResponse.Builder builder = ScanResponse.newBuilder(); if (request.hasCloseScanner()) { closeScanner = request.getCloseScanner(); @@ -2325,8 +2371,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // where processing of request takes > lease expiration time. lease = regionServer.leases.removeLease(scannerName); List results = new ArrayList(); - long totalCellSize = 0; - long currentScanResultSize = 0; boolean done = false; // Call coprocessor. Get region info from scanner. @@ -2336,8 +2380,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (!results.isEmpty()) { for (Result r : results) { for (Cell cell : r.rawCells()) { - totalCellSize += CellUtil.estimatedSerializedSizeOf(cell); - currentScanResultSize += CellUtil.estimatedHeapSizeOfWithoutTags(cell); + if (context != null) { + context.incrementResponseCellSize(CellUtil.estimatedSerializedSizeOf(cell)); + } } } } @@ -2370,7 +2415,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // If the coprocessor host is adding to the result list, we cannot guarantee the // correct ordering of partial results and so we prevent partial results from being // formed. - boolean serverGuaranteesOrderOfPartials = currentScanResultSize == 0; + boolean serverGuaranteesOrderOfPartials = results.isEmpty(); boolean allowPartialResults = clientHandlesPartials && serverGuaranteesOrderOfPartials && !isSmallScan; boolean moreRows = false; @@ -2437,7 +2482,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (!values.isEmpty()) { for (Cell cell : values) { - totalCellSize += CellUtil.estimatedSerializedSizeOf(cell); + if (context != null) { + context.incrementResponseCellSize(CellUtil.estimatedSerializedSizeOf(cell)); + } } final boolean partial = scannerContext.partialResultFormed(); results.add(Result.create(values, null, stale, partial)); @@ -2492,9 +2539,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } region.updateReadRequestsCount(i); - region.getMetrics().updateScanNext(totalCellSize); + long responseCellSize = context != null ? context.getResponseCellSize() : 0; + region.getMetrics().updateScanNext(responseCellSize); if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateScannerNext(totalCellSize); + regionServer.metricsRegionServer.updateScannerNext(responseCellSize); } } finally { region.closeRegionOperation(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java new file mode 100644 index 00000000000..47dd7beccfd --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.metrics.BaseSource; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.ArrayList; +import java.util.List; + +import static junit.framework.TestCase.assertEquals; + +/** + * This test sets the multi size WAAAAAY low and then checks to make sure that gets will still make + * progress. + */ +@Category({MediumTests.class, ClientTests.class}) +public class TestMultiRespectsLimits { + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final MetricsAssertHelper METRICS_ASSERT = + CompatibilityFactory.getInstance(MetricsAssertHelper.class); + private final static byte[] FAMILY = Bytes.toBytes("D"); + public static final int MAX_SIZE = 500; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setLong( + HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, + MAX_SIZE); + + // Only start on regionserver so that all regions are on the same server. + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMultiLimits() throws Exception { + final TableName name = TableName.valueOf("testMultiLimits"); + Table t = TEST_UTIL.createTable(name, FAMILY); + TEST_UTIL.loadTable(t, FAMILY, false); + + // Split the table to make sure that the chunking happens accross regions. + try (final Admin admin = TEST_UTIL.getHBaseAdmin()) { + admin.split(name); + TEST_UTIL.waitFor(60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return admin.getTableRegions(name).size() > 1; + } + }); + } + List gets = new ArrayList<>(MAX_SIZE); + + for (int i = 0; i < MAX_SIZE; i++) { + gets.add(new Get(HBaseTestingUtility.ROWS[i])); + } + Result[] results = t.get(gets); + assertEquals(MAX_SIZE, results.length); + RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer(); + BaseSource s = rpcServer.getMetrics().getMetricsSource(); + + // Cells from TEST_UTIL.loadTable have a length of 27. + // Multiplying by less than that gives an easy lower bound on size. + // However in reality each kv is being reported as much higher than that. + METRICS_ASSERT.assertCounterGt("exceptions", (MAX_SIZE * 25) / MAX_SIZE, s); + METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", + (MAX_SIZE * 25) / MAX_SIZE, s); + } +} From 6163fb965d4b50f09c21992ec22fd3745ffb9d3f Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Wed, 4 Nov 2015 13:40:46 -0800 Subject: [PATCH 09/72] HBASE-14745 Shade the last few dependencies in hbase-shaded-client --- hbase-shaded/pom.xml | 28 ++++++++++++++++++++++++++++ pom.xml | 3 +++ 2 files changed, 31 insertions(+) diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml index 76bb68c19a5..7e5378f0299 100644 --- a/hbase-shaded/pom.xml +++ b/hbase-shaded/pom.xml @@ -294,12 +294,40 @@ org.apache.commons.el org.apache.hadoop.hbase.shaded.org.apache.commons.el + + org.apache.commons.httpclient + org.apache.hadoop.hbase.shaded.org.apache.commons.httpclient + + + org.apache.commons.compress + org.apache.hadoop.hbase.shaded.org.apache.commons.compress + + + org.apache.commons.digester + org.apache.hadoop.hbase.shaded.org.apache.commons.digester + + + org.apache.commons.codec + org.apache.hadoop.hbase.shaded.org.apache.commons.codec + net.iharder org.apache.hadoop.hbase.shaded.net.iharder + + + + junit + org.apache.hadoop.hbase.shaded.junit + + + org.junit + org.apache.hadoop.hbase.shaded.org.junit + + + - - - hbase.regionserver.msginterval - 1000 - Interval between messages from the RegionServer to HMaster - in milliseconds. Default is 15. Set this value low if you want unit - tests to be responsive. - - - - hbase.defaults.for.version.skip - true - - - hbase.server.thread.wakefrequency - 1000 - Time to sleep in between searches for work (in milliseconds). - Used as sleep interval by service threads such as hbase:meta scanner and log roller. - - - - hbase.master.event.waiting.time - 50 - Time to sleep between checks to see if a table event took place. - - - - hbase.regionserver.handler.count - 5 - - - hbase.regionserver.metahandler.count - 5 - - - hbase.ipc.server.read.threadpool.size - 3 - - - hbase.master.info.port - -1 - The port for the hbase master web UI - Set to -1 if you do not want the info server to run. - - - - hbase.master.port - 0 - Always have masters and regionservers come up on port '0' so we don't clash over - default ports. - - - - hbase.regionserver.port - 0 - Always have masters and regionservers come up on port '0' so we don't clash over - default ports. - - - - hbase.ipc.client.fallback-to-simple-auth-allowed - true - - - - hbase.regionserver.info.port - -1 - The port for the hbase regionserver web UI - Set to -1 if you do not want the info server to run. - - - - hbase.regionserver.info.port.auto - true - Info server auto port bind. Enables automatic port - search if hbase.regionserver.info.port is already in use. - Enabled for testing to run multiple tests on one machine. - - - - hbase.master.lease.thread.wakefrequency - 3000 - The interval between checks for expired region server leases. - This value has been reduced due to the other reduced values above so that - the master will notice a dead region server sooner. The default is 15 seconds. - - - - hbase.regionserver.safemode - false - - Turn on/off safe mode in region server. Always on for production, always off - for tests. - - - - hbase.hregion.max.filesize - 67108864 - - Maximum desired file size for an HRegion. If filesize exceeds - value + (value / 2), the HRegion is split in two. Default: 256M. - - Keep the maximum filesize small so we split more often in tests. - - - - hadoop.log.dir - ${user.dir}/../logs - - - hbase.zookeeper.property.clientPort - 21818 - Property from ZooKeeper's config zoo.cfg. - The port at which the clients will connect. - - - - hbase.defaults.for.version.skip - true - - Set to true to skip the 'hbase.defaults.for.version'. - Setting this to true can be useful in contexts other than - the other side of a maven generation; i.e. running in an - ide. You'll want to set this boolean to true to avoid - seeing the RuntimException complaint: "hbase-default.xml file - seems to be for and old version of HBase (@@@VERSION@@@), this - version is X.X.X-SNAPSHOT" - - - - hbase.table.sanity.checks - false - Skip sanity checks in tests - - - - hbase.procedure.fail.on.corruption - true - - Enable replay sanity checks on procedure tests. - - - diff --git a/hbase-spark/src/test/resources/log4j.properties b/hbase-spark/src/test/resources/log4j.properties deleted file mode 100644 index 4eeeb2c2dee..00000000000 --- a/hbase-spark/src/test/resources/log4j.properties +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Define some default values that can be overridden by system properties -hbase.root.logger=INFO,console -hbase.log.dir=. -hbase.log.file=hbase.log - -# Define the root logger to the system property "hbase.root.logger". -log4j.rootLogger=${hbase.root.logger} - -# Logging Threshold -log4j.threshold=ALL - -# -# Daily Rolling File Appender -# -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} - -# Rollver at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -# 30-day backup -#log4j.appender.DRFA.MaxBackupIndex=30 -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout -# Debugging Pattern format -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n - -# Custom Logging levels - -#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG - -log4j.logger.org.apache.hadoop=WARN -log4j.logger.org.apache.zookeeper=ERROR -log4j.logger.org.apache.hadoop.hbase=DEBUG - -#These two settings are workarounds against spurious logs from the minicluster. -#See HBASE-4709 -log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR -log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR -# Enable this to get detailed connection error/retry logging. -# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE From 2f7d5e6354ca2ca5cbecae7bdd5df79d50848551 Mon Sep 17 00:00:00 2001 From: stack Date: Thu, 10 Dec 2015 21:25:21 -0800 Subject: [PATCH 12/72] HBASE-14946 Don't allow multi's to over run the max result size. --- .../org/apache/hadoop/hbase/MultiActionResultTooLarge.java | 6 +++++- .../org/apache/hadoop/hbase/RetryImmediatelyException.java | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java index d06eea1b974..fdff5544cfd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java @@ -18,13 +18,17 @@ package org.apache.hadoop.hbase; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * Exception thrown when the result needs to be chunked on the server side. * It signals that retries should happen right away and not count against the number of * retries because some of the multi was a success. */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class MultiActionResultTooLarge extends RetryImmediatelyException { - public MultiActionResultTooLarge(String s) { super(s); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java index 1b3990477ac..e0b90fd04e5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java @@ -19,7 +19,11 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +@InterfaceAudience.Public +@InterfaceStability.Evolving public class RetryImmediatelyException extends IOException { public RetryImmediatelyException(String s) { super(s); From 9e6acffca07b0d0206c8a5686347687050223de9 Mon Sep 17 00:00:00 2001 From: Ashu Pachauri Date: Tue, 8 Dec 2015 14:25:41 -0800 Subject: [PATCH 13/72] HBASE-14953 Replication: retry on RejectedExecutionException In HBaseInterClusterReplicationEndpoint, we fail the whole batch in case of a RejectedExecutionException on an individual sub-batch. We should let the submitted sub-batches finish and retry only for the remaining ones. Signed-off-by: Elliott Clark --- .../regionserver/HBaseInterClusterReplicationEndpoint.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 22646dbe666..6bc8c6ad3c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -29,7 +29,7 @@ import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; -import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -114,8 +114,9 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // per sink thread pool this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); - this.exec = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, - new SynchronousQueue()); + this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue()); + this.exec.allowCoreThreadTimeOut(true); this.replicationBulkLoadDataEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, From 3b9b8cc667d3a7ffb3473ac8f181f27cff8c1a4e Mon Sep 17 00:00:00 2001 From: chenheng Date: Mon, 14 Dec 2015 17:20:50 +0800 Subject: [PATCH 14/72] HBASE-14936 CombinedBlockCache should overwrite CacheStats#rollMetricsPeriod() (Jianwei Cui) --- .../hadoop/hbase/io/hfile/CacheStats.java | 7 +- .../hbase/io/hfile/CombinedBlockCache.java | 42 +++++--- .../io/hfile/TestCombinedBlockCache.java | 98 +++++++++++++++++++ 3 files changed, 131 insertions(+), 16 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 2dae66fc30b..fff6585aea5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -260,13 +260,14 @@ public class CacheStats { } public double getHitRatioPastNPeriods() { - double ratio = ((double)sum(hitCounts)/(double)sum(requestCounts)); + double ratio = ((double)getSumHitCountsPastNPeriods() / + (double)getSumRequestCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } public double getHitCachingRatioPastNPeriods() { - double ratio = - ((double)sum(hitCachingCounts)/(double)sum(requestCachingCounts)); + double ratio = ((double)getSumHitCachingCountsPastNPeriods() / + (double)getSumRequestCachingCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index fbc19a06c43..ed6a4bbf2b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -201,22 +201,38 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { } @Override - public double getHitRatioPastNPeriods() { - double ratio = ((double) (lruCacheStats.getSumHitCountsPastNPeriods() + bucketCacheStats - .getSumHitCountsPastNPeriods()) / (double) (lruCacheStats - .getSumRequestCountsPastNPeriods() + bucketCacheStats - .getSumRequestCountsPastNPeriods())); - return Double.isNaN(ratio) ? 0 : ratio; + public void rollMetricsPeriod() { + lruCacheStats.rollMetricsPeriod(); + bucketCacheStats.rollMetricsPeriod(); + } + + @Override + public long getFailedInserts() { + return lruCacheStats.getFailedInserts() + bucketCacheStats.getFailedInserts(); } @Override - public double getHitCachingRatioPastNPeriods() { - double ratio = ((double) (lruCacheStats - .getSumHitCachingCountsPastNPeriods() + bucketCacheStats - .getSumHitCachingCountsPastNPeriods()) / (double) (lruCacheStats - .getSumRequestCachingCountsPastNPeriods() + bucketCacheStats - .getSumRequestCachingCountsPastNPeriods())); - return Double.isNaN(ratio) ? 0 : ratio; + public long getSumHitCountsPastNPeriods() { + return lruCacheStats.getSumHitCountsPastNPeriods() + + bucketCacheStats.getSumHitCountsPastNPeriods(); + } + + @Override + public long getSumRequestCountsPastNPeriods() { + return lruCacheStats.getSumRequestCountsPastNPeriods() + + bucketCacheStats.getSumRequestCountsPastNPeriods(); + } + + @Override + public long getSumHitCachingCountsPastNPeriods() { + return lruCacheStats.getSumHitCachingCountsPastNPeriods() + + bucketCacheStats.getSumHitCachingCountsPastNPeriods(); + } + + @Override + public long getSumRequestCachingCountsPastNPeriods() { + return lruCacheStats.getSumRequestCachingCountsPastNPeriods() + + bucketCacheStats.getSumRequestCachingCountsPastNPeriods(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java new file mode 100644 index 00000000000..50bf33132ce --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java @@ -0,0 +1,98 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.junit.Assert.assertEquals; +import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache.CombinedCacheStats; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({SmallTests.class}) +public class TestCombinedBlockCache { + @Test + public void testCombinedCacheStats() { + CacheStats lruCacheStats = new CacheStats("lruCacheStats", 2); + CacheStats bucketCacheStats = new CacheStats("bucketCacheStats", 2); + CombinedCacheStats stats = + new CombinedCacheStats(lruCacheStats, bucketCacheStats); + + double delta = 0.01; + + // period 1: + // lru cache: 1 hit caching, 1 miss caching + // bucket cache: 2 hit non-caching,1 miss non-caching/primary,1 fail insert + lruCacheStats.hit(true); + lruCacheStats.miss(true, false); + bucketCacheStats.hit(false); + bucketCacheStats.hit(false); + bucketCacheStats.miss(false, true); + + assertEquals(5, stats.getRequestCount()); + assertEquals(2, stats.getRequestCachingCount()); + assertEquals(2, stats.getMissCount()); + assertEquals(1, stats.getPrimaryMissCount()); + assertEquals(1, stats.getMissCachingCount()); + assertEquals(3, stats.getHitCount()); + assertEquals(3, stats.getPrimaryHitCount()); + assertEquals(1, stats.getHitCachingCount()); + assertEquals(0.6, stats.getHitRatio(), delta); + assertEquals(0.5, stats.getHitCachingRatio(), delta); + assertEquals(0.4, stats.getMissRatio(), delta); + assertEquals(0.5, stats.getMissCachingRatio(), delta); + + + // lru cache: 2 evicted, 1 evict + // bucket cache: 1 evict + lruCacheStats.evicted(1000, true); + lruCacheStats.evicted(1000, false); + lruCacheStats.evict(); + bucketCacheStats.evict(); + assertEquals(2, stats.getEvictionCount()); + assertEquals(2, stats.getEvictedCount()); + assertEquals(1, stats.getPrimaryEvictedCount()); + assertEquals(1.0, stats.evictedPerEviction(), delta); + + // lru cache: 1 fail insert + lruCacheStats.failInsert(); + assertEquals(1, stats.getFailedInserts()); + + // rollMetricsPeriod + stats.rollMetricsPeriod(); + assertEquals(3, stats.getSumHitCountsPastNPeriods()); + assertEquals(5, stats.getSumRequestCountsPastNPeriods()); + assertEquals(1, stats.getSumHitCachingCountsPastNPeriods()); + assertEquals(2, stats.getSumRequestCachingCountsPastNPeriods()); + assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 2: + // lru cache: 3 hit caching + lruCacheStats.hit(true); + lruCacheStats.hit(true); + lruCacheStats.hit(true); + stats.rollMetricsPeriod(); + assertEquals(6, stats.getSumHitCountsPastNPeriods()); + assertEquals(8, stats.getSumRequestCountsPastNPeriods()); + assertEquals(4, stats.getSumHitCachingCountsPastNPeriods()); + assertEquals(5, stats.getSumRequestCachingCountsPastNPeriods()); + assertEquals(0.75, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.8, stats.getHitCachingRatioPastNPeriods(), delta); + } +} From 7d3af71db5f53fa39bbf612a6927acb875cea4ce Mon Sep 17 00:00:00 2001 From: Jonathan M Hsieh Date: Mon, 14 Dec 2015 16:07:05 -0800 Subject: [PATCH 15/72] HBASE-14929 There is a space missing from table "foo" is not currently available (Carlos A. Morillo) --- .../apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index d6bcd660940..a2b8b924618 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -348,7 +348,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { RegionLocator regionLocator) throws TableNotFoundException, IOException { if (!admin.isTableAvailable(regionLocator.getName())) { - throw new TableNotFoundException("Table " + table.getName() + "is not currently available."); + throw new TableNotFoundException("Table " + table.getName() + " is not currently available."); } ExecutorService pool = createExecutorService(); From a290a5d978a5eb730d9708bc2c0384d22fc37fb6 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Mon, 14 Dec 2015 18:35:40 -0800 Subject: [PATCH 16/72] HBASE-14979 Update to the newest Zookeeper release --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 95996502384..12ab9ebe064 100644 --- a/pom.xml +++ b/pom.xml @@ -1144,7 +1144,7 @@ 2.5.0 thrift 0.9.3 - 3.4.6 + 3.4.7 1.7.7 4.0.3 2.4.1 From fbad4d2466441d09f45d6fd42486b5e6dc00a893 Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Tue, 15 Dec 2015 11:24:24 -0800 Subject: [PATCH 17/72] HBASE-14968 ConcurrentModificationException in region close resulting in the region staying in closing state Conflicts: hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java --- .../hbase/coprocessor/CoprocessorHost.java | 24 +++++++---- .../hadoop/hbase/executor/EventHandler.java | 7 ++- .../handler/RegionReplicaFlushHandler.java | 4 +- .../hbase/executor/TestExecutorService.java | 43 ++++++++++++++++--- 4 files changed, 60 insertions(+), 18 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index d692fb860df..30051d1dd14 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -104,8 +104,11 @@ public abstract class CoprocessorHost { */ private static Set coprocessorNames = Collections.synchronizedSet(new HashSet()); + public static Set getLoadedCoprocessors() { - return coprocessorNames; + synchronized (coprocessorNames) { + return new HashSet(coprocessorNames); + } } /** @@ -349,6 +352,7 @@ public abstract class CoprocessorHost { */ static class EnvironmentPriorityComparator implements Comparator { + @Override public int compare(final CoprocessorEnvironment env1, final CoprocessorEnvironment env2) { if (env1.getPriority() < env2.getPriority()) { @@ -437,14 +441,16 @@ public abstract class CoprocessorHost { LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+ " because not active (state="+state.toString()+")"); } - // clean up any table references - for (HTableInterface table: openTables) { - try { - ((HTableWrapper)table).internalClose(); - } catch (IOException e) { - // nothing can be done here - LOG.warn("Failed to close " + - Bytes.toStringBinary(table.getTableName()), e); + synchronized (openTables) { + // clean up any table references + for (HTableInterface table: openTables) { + try { + ((HTableWrapper)table).internalClose(); + } catch (IOException e) { + // nothing can be done here + LOG.warn("Failed to close " + + Bytes.toStringBinary(table.getTableName()), e); + } } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index 541089a0c86..73fd7f28908 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -121,6 +121,7 @@ public abstract class EventHandler implements Runnable, Comparable { return this; } + @Override public void run() { TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent); try { @@ -223,6 +224,10 @@ public abstract class EventHandler implements Runnable, Comparable { * @param t Throwable object */ protected void handleException(Throwable t) { - LOG.error("Caught throwable while processing event " + eventType, t); + String msg = "Caught throwable while processing event " + eventType; + LOG.error(msg, t); + if (server != null) { + server.abort(msg, t); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index e0921b09256..94cab85df87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -80,10 +80,8 @@ public class RegionReplicaFlushHandler extends EventHandler { @Override protected void handleException(Throwable t) { - super.handleException(t); - if (t instanceof InterruptedIOException || t instanceof InterruptedException) { - // ignore + LOG.error("Caught throwable while processing event " + eventType, t); } else if (t instanceof RuntimeException) { server.abort("ServerAborting because a runtime exception was thrown", t); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java index acb7ecf4ea2..25496ed5313 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.executor; import static org.junit.Assert.*; -import static org.junit.Assert.assertEquals; - import java.io.IOException; import java.io.StringWriter; import java.util.concurrent.ThreadPoolExecutor; @@ -29,7 +27,9 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.executor.ExecutorService.Executor; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorStatus; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -89,8 +89,8 @@ public class TestExecutorService { assertTrue(status.queuedEvents.isEmpty()); assertEquals(5, status.running.size()); checkStatusDump(status); - - + + // Now interrupt the running Executor synchronized (lock) { lock.set(false); @@ -139,7 +139,7 @@ public class TestExecutorService { status.dumpTo(sw, ""); String dump = sw.toString(); LOG.info("Got status dump:\n" + dump); - + assertTrue(dump.contains("Waiting on java.util.concurrent.atomic.AtomicBoolean")); } @@ -172,5 +172,38 @@ public class TestExecutorService { } } + @Test + public void testAborting() throws Exception { + final Configuration conf = HBaseConfiguration.create(); + final Server server = mock(Server.class); + when(server.getConfiguration()).thenReturn(conf); + + ExecutorService executorService = new ExecutorService("unit_test"); + executorService.startExecutorService( + ExecutorType.MASTER_SERVER_OPERATIONS, 1); + + + executorService.submit(new EventHandler(server, EventType.M_SERVER_SHUTDOWN) { + @Override + public void process() throws IOException { + throw new RuntimeException("Should cause abort"); + } + }); + + Waiter.waitFor(conf, 30000, new Predicate() { + @Override + public boolean evaluate() throws Exception { + try { + verify(server, times(1)).abort(anyString(), (Throwable) anyObject()); + return true; + } catch (Throwable t) { + return false; + } + } + }); + + executorService.shutdown(); + } + } From 31d73a4bdec59ba2ca6bdc04d881f2bc3726e903 Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Tue, 15 Dec 2015 11:57:28 -0800 Subject: [PATCH 18/72] HBASE-14977 ChoreService.shutdown may result in ConcurrentModificationException (Vladimir Rodionov) --- .../src/main/java/org/apache/hadoop/hbase/ChoreService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java index 091d8541388..5c3d2158538 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java @@ -317,7 +317,7 @@ public class ChoreService implements ChoreServicer { * in the middle of execution will be interrupted and shutdown. This service will be unusable * after this method has been called (i.e. future scheduling attempts will fail). */ - public void shutdown() { + public synchronized void shutdown() { scheduler.shutdownNow(); if (LOG.isInfoEnabled()) { LOG.info("Chore service for: " + coreThreadPoolPrefix + " had " + scheduledChores.keySet() From bf7c36fccac5477d08e296ad93671d2c30ae2dc8 Mon Sep 17 00:00:00 2001 From: Matteo Bertozzi Date: Tue, 15 Dec 2015 10:15:18 -0800 Subject: [PATCH 19/72] HBASE-14947 WALProcedureStore improvements --- .../store/ProcedureStoreTracker.java | 21 +- .../store/wal/WALProcedureStore.java | 294 +++++++++--------- .../procedure2/TestProcedureRecovery.java | 4 +- .../store/TestProcedureStoreTracker.java | 35 +-- .../store/wal/TestWALProcedureStore.java | 51 ++- 5 files changed, 203 insertions(+), 202 deletions(-) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java index 8516f612760..6823288a3cd 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java @@ -27,7 +27,6 @@ import java.util.TreeMap; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; /** @@ -356,25 +355,19 @@ public class ProcedureStoreTracker { } } - public void insert(final Procedure proc, final Procedure[] subprocs) { - insert(proc.getProcId()); - if (subprocs != null) { - for (int i = 0; i < subprocs.length; ++i) { - insert(subprocs[i].getProcId()); - } - } - } - - public void update(final Procedure proc) { - update(proc.getProcId()); - } - public void insert(long procId) { BitSetNode node = getOrCreateNode(procId); node.update(procId); trackProcIds(procId); } + public void insert(final long procId, final long[] subProcIds) { + update(procId); + for (int i = 0; i < subProcIds.length; ++i) { + insert(subProcIds[i]); + } + } + public void update(long procId) { Map.Entry entry = map.floorEntry(procId); assert entry != null : "expected node to update procId=" + procId; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index ec42d6a156c..20709a91142 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -100,7 +100,6 @@ public class WALProcedureStore extends ProcedureStoreBase { private final LinkedList logs = new LinkedList(); private final ProcedureStoreTracker storeTracker = new ProcedureStoreTracker(); - private final AtomicLong inactiveLogsMaxId = new AtomicLong(0); private final ReentrantLock lock = new ReentrantLock(); private final Condition waitCond = lock.newCondition(); private final Condition slotCond = lock.newCondition(); @@ -191,19 +190,16 @@ public class WALProcedureStore extends ProcedureStoreBase { } LOG.info("Stopping the WAL Procedure Store"); - if (lock.tryLock()) { - try { - waitCond.signalAll(); - syncCond.signalAll(); - } finally { - lock.unlock(); - } - } + sendStopSignal(); if (!abort) { try { - syncThread.join(); + while (syncThread.isAlive()) { + sendStopSignal(); + syncThread.join(250); + } } catch (InterruptedException e) { + LOG.warn("join interrupted", e); Thread.currentThread().interrupt(); } } @@ -220,6 +216,17 @@ public class WALProcedureStore extends ProcedureStoreBase { logs.clear(); } + private void sendStopSignal() { + if (lock.tryLock()) { + try { + waitCond.signalAll(); + syncCond.signalAll(); + } finally { + lock.unlock(); + } + } + } + @Override public int getNumThreads() { return slots == null ? 0 : slots.length; @@ -239,31 +246,36 @@ public class WALProcedureStore extends ProcedureStoreBase { @Override public void recoverLease() throws IOException { - LOG.info("Starting WAL Procedure Store lease recovery"); - FileStatus[] oldLogs = getLogFiles(); - while (isRunning()) { - // Get Log-MaxID and recover lease on old logs - flushLogId = initOldLogs(oldLogs); + lock.lock(); + try { + LOG.info("Starting WAL Procedure Store lease recovery"); + FileStatus[] oldLogs = getLogFiles(); + while (isRunning()) { + // Get Log-MaxID and recover lease on old logs + flushLogId = initOldLogs(oldLogs); - // Create new state-log - if (!rollWriter(flushLogId + 1)) { - // someone else has already created this log - LOG.debug("someone else has already created log " + flushLogId); - continue; - } - - // We have the lease on the log - oldLogs = getLogFiles(); - if (getMaxLogId(oldLogs) > flushLogId) { - if (LOG.isDebugEnabled()) { - LOG.debug("Someone else created new logs. Expected maxLogId < " + flushLogId); + // Create new state-log + if (!rollWriter(flushLogId + 1)) { + // someone else has already created this log + LOG.debug("someone else has already created log " + flushLogId); + continue; } - logs.getLast().removeFile(); - continue; - } - LOG.info("Lease acquired for flushLogId: " + flushLogId); - break; + // We have the lease on the log + oldLogs = getLogFiles(); + if (getMaxLogId(oldLogs) > flushLogId) { + if (LOG.isDebugEnabled()) { + LOG.debug("Someone else created new logs. Expected maxLogId < " + flushLogId); + } + logs.getLast().removeFile(); + continue; + } + + LOG.info("Lease acquired for flushLogId: " + flushLogId); + break; + } + } finally { + lock.unlock(); } } @@ -335,18 +347,22 @@ public class WALProcedureStore extends ProcedureStoreBase { } ByteSlot slot = acquireSlot(); - long logId = -1; try { // Serialize the insert + long[] subProcIds = null; if (subprocs != null) { ProcedureWALFormat.writeInsert(slot, proc, subprocs); + subProcIds = new long[subprocs.length]; + for (int i = 0; i < subprocs.length; ++i) { + subProcIds[i] = subprocs[i].getProcId(); + } } else { assert !proc.hasParent(); ProcedureWALFormat.writeInsert(slot, proc); } // Push the transaction data and wait until it is persisted - pushData(slot); + pushData(PushType.INSERT, slot, proc.getProcId(), subProcIds); } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. @@ -356,14 +372,6 @@ public class WALProcedureStore extends ProcedureStoreBase { } finally { releaseSlot(slot); } - - // Update the store tracker - synchronized (storeTracker) { - storeTracker.insert(proc, subprocs); - if (logId == flushLogId) { - checkAndTryRoll(); - } - } } @Override @@ -373,13 +381,12 @@ public class WALProcedureStore extends ProcedureStoreBase { } ByteSlot slot = acquireSlot(); - long logId = -1; try { // Serialize the update ProcedureWALFormat.writeUpdate(slot, proc); // Push the transaction data and wait until it is persisted - logId = pushData(slot); + pushData(PushType.UPDATE, slot, proc.getProcId(), null); } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. @@ -388,20 +395,6 @@ public class WALProcedureStore extends ProcedureStoreBase { } finally { releaseSlot(slot); } - - // Update the store tracker - boolean removeOldLogs = false; - synchronized (storeTracker) { - storeTracker.update(proc); - if (logId == flushLogId) { - removeOldLogs = storeTracker.isUpdated(); - checkAndTryRoll(); - } - } - - if (removeOldLogs) { - setInactiveLogsMaxId(logId - 1); - } } @Override @@ -411,13 +404,12 @@ public class WALProcedureStore extends ProcedureStoreBase { } ByteSlot slot = acquireSlot(); - long logId = -1; try { // Serialize the delete ProcedureWALFormat.writeDelete(slot, procId); // Push the transaction data and wait until it is persisted - logId = pushData(slot); + pushData(PushType.DELETE, slot, procId, null); } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. @@ -426,22 +418,6 @@ public class WALProcedureStore extends ProcedureStoreBase { } finally { releaseSlot(slot); } - - boolean removeOldLogs = false; - synchronized (storeTracker) { - storeTracker.delete(procId); - if (logId == flushLogId) { - if (storeTracker.isEmpty() || storeTracker.isUpdated()) { - removeOldLogs = checkAndTryRoll(); - } else { - checkAndTryRoll(); - } - } - } - - if (removeOldLogs) { - setInactiveLogsMaxId(logId); - } } private ByteSlot acquireSlot() { @@ -454,7 +430,10 @@ public class WALProcedureStore extends ProcedureStoreBase { slotsCache.offer(slot); } - private long pushData(final ByteSlot slot) { + private enum PushType { INSERT, UPDATE, DELETE }; + + private long pushData(final PushType type, final ByteSlot slot, + final long procId, final long[] subProcIds) { if (!isRunning()) { throw new RuntimeException("the store must be running before inserting data"); } @@ -481,6 +460,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } } + updateStoreTracker(type, procId, subProcIds); slots[slotIndex++] = slot; logId = flushLogId; @@ -509,20 +489,29 @@ public class WALProcedureStore extends ProcedureStoreBase { return logId; } - private boolean isSyncAborted() { - return syncException.get() != null; + private void updateStoreTracker(final PushType type, + final long procId, final long[] subProcIds) { + switch (type) { + case INSERT: + if (subProcIds == null) { + storeTracker.insert(procId); + } else { + storeTracker.insert(procId, subProcIds); + } + break; + case UPDATE: + storeTracker.update(procId); + break; + case DELETE: + storeTracker.delete(procId); + break; + default: + throw new RuntimeException("invalid push type " + type); + } } - protected void periodicRoll() throws IOException { - long logId; - boolean removeOldLogs; - synchronized (storeTracker) { - logId = flushLogId; - removeOldLogs = storeTracker.isEmpty(); - } - if (checkAndTryRoll() && removeOldLogs) { - setInactiveLogsMaxId(logId); - } + private boolean isSyncAborted() { + return syncException.get() != null; } private void syncLoop() throws Throwable { @@ -534,7 +523,7 @@ public class WALProcedureStore extends ProcedureStoreBase { // Wait until new data is available if (slotIndex == 0) { if (!loading.get()) { - removeInactiveLogs(); + periodicRoll(); } if (LOG.isTraceEnabled()) { @@ -547,7 +536,6 @@ public class WALProcedureStore extends ProcedureStoreBase { waitCond.await(getMillisToNextPeriodicRoll(), TimeUnit.MILLISECONDS); if (slotIndex == 0) { // no data.. probably a stop() or a periodic roll - periodicRoll(); continue; } } @@ -560,13 +548,12 @@ public class WALProcedureStore extends ProcedureStoreBase { long syncWaitMs = System.currentTimeMillis() - syncWaitSt; if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < slots.length)) { float rollSec = getMillisFromLastRoll() / 1000.0f; - LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s/sec", + LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)", StringUtils.humanTimeDiff(syncWaitMs), slotIndex, StringUtils.humanSize(totalSynced.get()), StringUtils.humanSize(totalSynced.get() / rollSec))); } - inSync.set(true); totalSynced.addAndGet(syncSlots()); slotIndex = 0; @@ -639,8 +626,7 @@ public class WALProcedureStore extends ProcedureStoreBase { return totalSynced; } - @VisibleForTesting - public boolean rollWriterOrDie() { + private boolean rollWriterOrDie() { for (int i = 1; i <= rollRetries; ++i) { try { if (rollWriter()) { @@ -656,17 +642,13 @@ public class WALProcedureStore extends ProcedureStoreBase { throw new RuntimeException("unable to roll the log"); } - protected boolean checkAndTryRoll() { - if (!isRunning()) return false; - - if (totalSynced.get() > rollThreshold || getMillisToNextPeriodicRoll() <= 0) { - try { - return rollWriter(); - } catch (IOException e) { - LOG.warn("Unable to roll the log", e); - } + private boolean tryRollWriter() { + try { + return rollWriter(); + } catch (IOException e) { + LOG.warn("Unable to roll the log", e); + return false; } - return false; } private long getMillisToNextPeriodicRoll() { @@ -680,7 +662,52 @@ public class WALProcedureStore extends ProcedureStoreBase { return (System.currentTimeMillis() - lastRollTs.get()); } - protected boolean rollWriter() throws IOException { + @VisibleForTesting + protected void periodicRollForTesting() throws IOException { + lock.lock(); + try { + periodicRoll(); + } finally { + lock.unlock(); + } + } + + @VisibleForTesting + protected boolean rollWriterForTesting() throws IOException { + lock.lock(); + try { + return rollWriter(); + } finally { + lock.unlock(); + } + } + + private void periodicRoll() throws IOException { + if (storeTracker.isEmpty()) { + if (LOG.isTraceEnabled()) { + LOG.trace("no active procedures"); + } + tryRollWriter(); + removeAllLogs(flushLogId - 1); + } else { + if (storeTracker.isUpdated()) { + if (LOG.isTraceEnabled()) { + LOG.trace("all the active procedures are in the latest log"); + } + removeAllLogs(flushLogId - 1); + } + + // if the log size has exceeded the roll threshold + // or the periodic roll timeout is expired, try to roll the wal. + if (totalSynced.get() > rollThreshold || getMillisToNextPeriodicRoll() <= 0) { + tryRollWriter(); + } + + removeInactiveLogs(); + } + } + + private boolean rollWriter() throws IOException { // Create new state-log if (!rollWriter(flushLogId + 1)) { LOG.warn("someone else has already created log " + flushLogId); @@ -701,6 +728,7 @@ public class WALProcedureStore extends ProcedureStoreBase { private boolean rollWriter(final long logId) throws IOException { assert logId > flushLogId : "logId=" + logId + " flushLogId=" + flushLogId; + assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked(); ProcedureWALHeader header = ProcedureWALHeader.newBuilder() .setVersion(ProcedureWALFormat.HEADER_VERSION) @@ -730,20 +758,16 @@ public class WALProcedureStore extends ProcedureStoreBase { newStream.close(); return false; } - lock.lock(); - try { - closeStream(); - synchronized (storeTracker) { - storeTracker.resetUpdates(); - } - stream = newStream; - flushLogId = logId; - totalSynced.set(0); - lastRollTs.set(System.currentTimeMillis()); - logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos)); - } finally { - lock.unlock(); - } + + closeStream(); + + storeTracker.resetUpdates(); + stream = newStream; + flushLogId = logId; + totalSynced.set(0); + lastRollTs.set(System.currentTimeMillis()); + logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos)); + if (LOG.isDebugEnabled()) { LOG.debug("Roll new state log: " + logId); } @@ -754,11 +778,9 @@ public class WALProcedureStore extends ProcedureStoreBase { try { if (stream != null) { try { - synchronized (storeTracker) { - ProcedureWALFile log = logs.getLast(); - log.setProcIds(storeTracker.getUpdatedMinProcId(), storeTracker.getUpdatedMaxProcId()); - ProcedureWALFormat.writeTrailer(stream, storeTracker); - } + ProcedureWALFile log = logs.getLast(); + log.setProcIds(storeTracker.getUpdatedMinProcId(), storeTracker.getUpdatedMaxProcId()); + ProcedureWALFormat.writeTrailer(stream, storeTracker); } catch (IOException e) { LOG.warn("Unable to write the trailer: " + e.getMessage()); } @@ -774,30 +796,12 @@ public class WALProcedureStore extends ProcedureStoreBase { // ========================================================================== // Log Files cleaner helpers // ========================================================================== - private void setInactiveLogsMaxId(long logId) { - long expect = 0; - while (!inactiveLogsMaxId.compareAndSet(expect, logId)) { - expect = inactiveLogsMaxId.get(); - if (expect >= logId) { - break; - } - } - } - private void removeInactiveLogs() { - long lastLogId = inactiveLogsMaxId.get(); - if (lastLogId != 0) { - removeAllLogs(lastLogId); - inactiveLogsMaxId.compareAndSet(lastLogId, 0); - } - // Verify if the ProcId of the first oldest is still active. if not remove the file. while (logs.size() > 1) { ProcedureWALFile log = logs.getFirst(); - synchronized (storeTracker) { - if (storeTracker.isTracking(log.getMinProcId(), log.getMaxProcId())) { - break; - } + if (storeTracker.isTracking(log.getMinProcId(), log.getMaxProcId())) { + break; } removeLogFile(log); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java index 0cb12919eaf..9e01fcfb70a 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -312,7 +312,7 @@ public class TestProcedureRecovery { public void testRunningProcWithSameNonce() throws Exception { final long nonceGroup = 456; final long nonce = 33333; - Procedure proc = new TestMultiStepProcedure(); + Procedure proc = new TestSingleStepProcedure(); long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc, nonceGroup, nonce); // Restart (use a latch to prevent the step execution until we submitted proc2) @@ -320,7 +320,7 @@ public class TestProcedureRecovery { procEnv.setWaitLatch(latch); restart(); // Submit a procedure with the same nonce and expect the same procedure would return. - Procedure proc2 = new TestMultiStepProcedure(); + Procedure proc2 = new TestSingleStepProcedure(); long procId2 = procExecutor.submitProcedure(proc2, nonceGroup, nonce); latch.countDown(); procEnv.setWaitLatch(null); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java index 0dc9d928e1d..26a94d4c398 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java @@ -25,7 +25,6 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; @@ -41,27 +40,6 @@ import static org.junit.Assert.fail; public class TestProcedureStoreTracker { private static final Log LOG = LogFactory.getLog(TestProcedureStoreTracker.class); - static class TestProcedure extends Procedure { - public TestProcedure(long procId) { - setProcId(procId); - } - - @Override - protected Procedure[] execute(Void env) { return null; } - - @Override - protected void rollback(Void env) { /* no-op */ } - - @Override - protected boolean abort(Void env) { return false; } - - @Override - protected void serializeStateData(final OutputStream stream) { /* no-op */ } - - @Override - protected void deserializeStateData(final InputStream stream) { /* no-op */ } - } - @Test public void testSeqInsertAndDelete() { ProcedureStoreTracker tracker = new ProcedureStoreTracker(); @@ -161,13 +139,10 @@ public class TestProcedureStoreTracker { ProcedureStoreTracker tracker = new ProcedureStoreTracker(); assertTrue(tracker.isEmpty()); - Procedure[] procs = new TestProcedure[] { - new TestProcedure(1), new TestProcedure(2), new TestProcedure(3), - new TestProcedure(4), new TestProcedure(5), new TestProcedure(6), - }; + long[] procs = new long[] { 1, 2, 3, 4, 5, 6 }; - tracker.insert(procs[0], null); - tracker.insert(procs[1], new Procedure[] { procs[2], procs[3], procs[4] }); + tracker.insert(procs[0]); + tracker.insert(procs[1], new long[] { procs[2], procs[3], procs[4] }); assertFalse(tracker.isEmpty()); assertTrue(tracker.isUpdated()); @@ -189,11 +164,11 @@ public class TestProcedureStoreTracker { assertTrue(tracker.isUpdated()); for (int i = 0; i < 5; ++i) { - tracker.delete(procs[i].getProcId()); + tracker.delete(procs[i]); assertFalse(tracker.isEmpty()); assertTrue(tracker.isUpdated()); } - tracker.delete(procs[5].getProcId()); + tracker.delete(procs[5]); assertTrue(tracker.isEmpty()); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java index 1265f3fab3e..18ee05bf6c5 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -103,7 +103,7 @@ public class TestWALProcedureStore { @Test public void testEmptyRoll() throws Exception { for (int i = 0; i < 10; ++i) { - procStore.periodicRoll(); + procStore.periodicRollForTesting(); } FileStatus[] status = fs.listStatus(logDir); assertEquals(1, status.length); @@ -215,14 +215,14 @@ public class TestWALProcedureStore { procStore.update(rootProcs[i-1]); } // insert root-child txn - procStore.rollWriter(); + procStore.rollWriterForTesting(); for (int i = 1; i <= rootProcs.length; i++) { TestProcedure b = new TestProcedure(rootProcs.length + i, i); rootProcs[i-1].addStackId(1); procStore.insert(rootProcs[i-1], new Procedure[] { b }); } // insert child updates - procStore.rollWriter(); + procStore.rollWriterForTesting(); for (int i = 1; i <= rootProcs.length; i++) { procStore.update(new TestProcedure(rootProcs.length + i, i)); } @@ -230,9 +230,10 @@ public class TestWALProcedureStore { // Stop the store procStore.stop(false); - // Remove 4 byte from the trailer + // the first log was removed, + // we have insert-txn and updates in the others so everything is fine FileStatus[] logs = fs.listStatus(logDir); - assertEquals(3, logs.length); + assertEquals(Arrays.toString(logs), 2, logs.length); Arrays.sort(logs, new Comparator() { @Override public int compare(FileStatus o1, FileStatus o2) { @@ -240,15 +241,13 @@ public class TestWALProcedureStore { } }); - // Remove the first log, we have insert-txn and updates in the others so everything is fine. - fs.delete(logs[0].getPath(), false); LoadCounter loader = new LoadCounter(); storeRestart(loader); assertEquals(rootProcs.length * 2, loader.getLoadedCount()); assertEquals(0, loader.getCorruptedCount()); - // Remove the second log, we have lost any root/parent references - fs.delete(logs[1].getPath(), false); + // Remove the second log, we have lost all the root/parent references + fs.delete(logs[0].getPath(), false); loader.reset(); storeRestart(loader); assertEquals(0, loader.getLoadedCount()); @@ -277,7 +276,7 @@ public class TestWALProcedureStore { b.addStackId(1); procStore.update(b); - procStore.rollWriter(); + procStore.rollWriterForTesting(); a.addStackId(2); procStore.update(a); @@ -326,7 +325,7 @@ public class TestWALProcedureStore { b.addStackId(2); procStore.update(b); - procStore.rollWriter(); + procStore.rollWriterForTesting(); b.addStackId(3); procStore.update(b); @@ -427,6 +426,36 @@ public class TestWALProcedureStore { assertEquals(1, procStore.getActiveLogs().size()); } + @Test + public void testRollAndRemove() throws IOException { + // Insert something in the log + Procedure proc1 = new TestSequentialProcedure(); + procStore.insert(proc1, null); + + Procedure proc2 = new TestSequentialProcedure(); + procStore.insert(proc2, null); + + // roll the log, now we have 2 + procStore.rollWriterForTesting(); + assertEquals(2, procStore.getActiveLogs().size()); + + // everything will be up to date in the second log + // so we can remove the first one + procStore.update(proc1); + procStore.update(proc2); + assertEquals(1, procStore.getActiveLogs().size()); + + // roll the log, now we have 2 + procStore.rollWriterForTesting(); + assertEquals(2, procStore.getActiveLogs().size()); + + // remove everything active + // so we can remove all the logs + procStore.delete(proc1.getProcId()); + procStore.delete(proc2.getProcId()); + assertEquals(1, procStore.getActiveLogs().size()); + } + private void corruptLog(final FileStatus logFile, final long dropBytes) throws IOException { assertTrue(logFile.getLen() > dropBytes); From 00805b399f0b953efe510ff3ef8584c39171019b Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Mon, 7 Dec 2015 12:51:44 -0500 Subject: [PATCH 20/72] HBASE-14838 Add clarification docs to SimpleRegionNormalizer. Signed-off-by: Sean Busbey --- .../hbase/master/normalizer/SimpleRegionNormalizer.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index d843bd18c49..659b3dc02d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -45,6 +45,10 @@ import java.util.List; * to merge, if R1 + R1 < S, and normalization stops *

  • Otherwise, no action is performed * + *

    + * Region sizes are coarse and approximate on the order of megabytes. Additionally, + * "empty" regions (less than 1MB, with the previous note) are not merged away. This + * is by design to prevent normalization from undoing the pre-splitting of a table. */ @InterfaceAudience.Private public class SimpleRegionNormalizer implements RegionNormalizer { From 25f7c33c2fc521ac977e7a533a30d2b5f91b6fa8 Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Tue, 8 Dec 2015 13:43:48 -0800 Subject: [PATCH 21/72] HBASE-14952 ensure correct components in source assembly. * hbase-external-blockcache * hbase-spark * remove duplicate inclusion of hbase-shaded-{client,server} Signed-off-by: stack --- hbase-assembly/src/main/assembly/src.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-assembly/src/main/assembly/src.xml b/hbase-assembly/src/main/assembly/src.xml index 8858308b358..cf73493d4b2 100644 --- a/hbase-assembly/src/main/assembly/src.xml +++ b/hbase-assembly/src/main/assembly/src.xml @@ -37,6 +37,7 @@ org.apache.hbase:hbase-client org.apache.hbase:hbase-common org.apache.hbase:hbase-examples + org.apache.hbase:hbase-external-blockcache org.apache.hbase:hbase-hadoop2-compat org.apache.hbase:hbase-hadoop-compat org.apache.hbase:hbase-it @@ -47,9 +48,8 @@ org.apache.hbase:hbase-resource-bundle org.apache.hbase:hbase-server org.apache.hbase:hbase-shaded - org.apache.hbase:hbase-shaded-client - org.apache.hbase:hbase-shaded-server org.apache.hbase:hbase-shell + org.apache.hbase:hbase-spark org.apache.hbase:hbase-testing-util org.apache.hbase:hbase-thrift From 69b96a666cae859e39eaba280d259427bc892fad Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Wed, 16 Dec 2015 12:51:20 -0600 Subject: [PATCH 22/72] HBASE-14952. ADDENDUM hbase-spark not present in branch-1 yet. --- hbase-assembly/src/main/assembly/src.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-assembly/src/main/assembly/src.xml b/hbase-assembly/src/main/assembly/src.xml index cf73493d4b2..16b22eeeafc 100644 --- a/hbase-assembly/src/main/assembly/src.xml +++ b/hbase-assembly/src/main/assembly/src.xml @@ -49,7 +49,6 @@ org.apache.hbase:hbase-server org.apache.hbase:hbase-shaded org.apache.hbase:hbase-shell - org.apache.hbase:hbase-spark org.apache.hbase:hbase-testing-util org.apache.hbase:hbase-thrift From d2fb7e61459ea7bf068631822cb670401db15554 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Tue, 15 Dec 2015 13:24:32 -0800 Subject: [PATCH 23/72] HBASE-14984 Allow memcached block cache to set optimze to false --- .../hadoop/hbase/io/hfile/MemcachedBlockCache.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index ba75542df2c..87da90bda3c 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -64,7 +64,9 @@ public class MemcachedBlockCache implements BlockCache { public static final String MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers"; public static final String MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout"; public static final String MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout"; + public static final String MEMCACHED_OPTIMIZE_KEY = "hbase.cache.memcached.spy.optimze"; public static final long MEMCACHED_DEFAULT_TIMEOUT = 500; + public static final boolean MEMCACHED_OPTIMIZE_DEFAULT = false; private final MemcachedClient client; private final HFileBlockTranscoder tc = new HFileBlockTranscoder(); @@ -75,18 +77,16 @@ public class MemcachedBlockCache implements BlockCache { long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT); long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT); + boolean optimize = c.getBoolean(MEMCACHED_OPTIMIZE_KEY, MEMCACHED_OPTIMIZE_DEFAULT); ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder() .setOpTimeout(opTimeout) .setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out .setFailureMode(FailureMode.Redistribute) - .setShouldOptimize(true) // When regions move lots of reads happen together - // So combining them into single requests is nice. + .setShouldOptimize(optimize) .setDaemon(true) // Don't keep threads around past the end of days. .setUseNagleAlgorithm(false) // Ain't nobody got time for that - .setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // 4 times larger than the - // default block just in case - + .setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // Much larger just in case // Assume only the localhost is serving memecached. // A la mcrouter or co-locating memcached with split regionservers. From 42518fa646374b1ac7f34663e4a57328e64a142e Mon Sep 17 00:00:00 2001 From: Mikhail Antonov Date: Wed, 16 Dec 2015 14:08:30 -0800 Subject: [PATCH 24/72] HBASE-14974 Total number of Regions in Transition number on UI incorrect --- .../hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon index 08ed672376d..a5e4b1a3e85 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon @@ -51,6 +51,7 @@ for (Map.Entry e : rit.entrySet()) { } } +int totalRITs = rit.size(); int toRemove = rit.size() - limit; int removed = 0; if (toRemove > 0) { @@ -90,7 +91,7 @@ if (toRemove > 0) { Total number of Regions in Transition for more than <% ritThreshold %> milliseconds <% numOfRITOverThreshold %> - Total number of Regions in Transition<% rit.size() %> + Total number of Regions in Transition<% totalRITs %> <%if removed > 0 %> (<% removed %> more regions in transition not shown) From 8bd0d9dacd5c9d600615f65787323683850afeb2 Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Wed, 16 Dec 2015 14:57:59 -0800 Subject: [PATCH 25/72] HBASE-14951 Make hbase.regionserver.maxlogs obsolete --- .../hadoop/hbase/regionserver/wal/FSHLog.java | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 6e39517fe4e..4329ce595c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -23,6 +23,8 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; import java.io.OutputStream; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryUsage; import java.lang.reflect.InvocationTargetException; import java.net.URLEncoder; import java.util.ArrayList; @@ -62,6 +64,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.DrainBarrier; @@ -505,8 +508,16 @@ public class FSHLog implements WAL { FSUtils.getDefaultBlockSize(this.fs, this.fullPathLogDir)); this.logrollsize = (long)(blocksize * conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f)); - - this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32); + + float memstoreRatio = conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, + conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_OLD_KEY, + HeapMemorySizeUtil.DEFAULT_MEMSTORE_SIZE)); + boolean maxLogsDefined = conf.get("hbase.regionserver.maxlogs") != null; + if(maxLogsDefined){ + LOG.warn("'hbase.regionserver.maxlogs' was deprecated."); + } + this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", + Math.max(32, calculateMaxLogFiles(memstoreRatio, logrollsize))); this.minTolerableReplication = conf.getInt("hbase.regionserver.hlog.tolerable.lowreplication", FSUtils.getDefaultReplication(fs, this.fullPathLogDir)); this.lowReplicationRollLimit = @@ -556,6 +567,12 @@ public class FSHLog implements WAL { this.disruptor.start(); } + private int calculateMaxLogFiles(float memstoreSizeRatio, long logRollSize) { + MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + int maxLogs = Math.round(mu.getMax() * memstoreSizeRatio * 2 / logRollSize); + return maxLogs; + } + /** * Get the backing files associated with this WAL. * @return may be null if there are no files. From b9070b2a452804ab5694f609485fb88b091a0fed Mon Sep 17 00:00:00 2001 From: tedyu Date: Wed, 16 Dec 2015 18:17:28 -0800 Subject: [PATCH 26/72] HBASE-14995 Optimize setting tagsPresent in DefaultMemStore.java (huaxiang sun) --- .../org/apache/hadoop/hbase/regionserver/DefaultMemStore.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index a3d4dfdc923..4893ad68404 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -240,7 +240,7 @@ public class DefaultMemStore implements MemStore { // When we use ACL CP or Visibility CP which deals with Tags during // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not // parse the byte[] to identify the tags length. - if(e.getTagsLength() > 0) { + if (!tagsPresent && (e.getTagsLength() > 0)) { tagsPresent = true; } setOldestEditTimeToNow(); From 988ab048ee3e327c03da3de1c65224c589d0567e Mon Sep 17 00:00:00 2001 From: tedyu Date: Wed, 16 Dec 2015 21:55:55 -0800 Subject: [PATCH 27/72] HBASE-14995 Revert according to Ram's feedback --- .../org/apache/hadoop/hbase/regionserver/DefaultMemStore.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 4893ad68404..a3d4dfdc923 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -240,7 +240,7 @@ public class DefaultMemStore implements MemStore { // When we use ACL CP or Visibility CP which deals with Tags during // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not // parse the byte[] to identify the tags length. - if (!tagsPresent && (e.getTagsLength() > 0)) { + if(e.getTagsLength() > 0) { tagsPresent = true; } setOldestEditTimeToNow(); From 6ec92281ed1f9683baad9800f9c3a6fe531ee827 Mon Sep 17 00:00:00 2001 From: chenheng Date: Thu, 17 Dec 2015 18:33:37 +0800 Subject: [PATCH 28/72] HBASE-14955 OOME: cannot create native thread is back --- .../org/apache/hadoop/hbase/HBaseTestingUtility.java | 10 ++++++++++ .../mapreduce/TestImportTSVWithVisibilityLabels.java | 4 +--- .../hbase/mapreduce/TestTableInputFormatScanBase.java | 4 +--- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index b0abf8df60a..44a27ba0bb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -161,6 +161,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table"; public static final boolean PRESPLIT_TEST_TABLE = true; + + public static final String USE_LOCAL_FILESYSTEM = "hbase.test.local.fileSystem"; /** * Set if we were passed a zkCluster. If so, we won't shutdown zk as * part of general shutdown. @@ -396,6 +398,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return testPath; } + public void setJobWithoutMRCluster() throws IOException { + conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString()); + conf.setBoolean(HBaseTestingUtility.USE_LOCAL_FILESYSTEM, true); + } + private void createSubDirAndSystemProperty( String propertyName, Path parent, String subDirName){ @@ -632,6 +639,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } FileSystem fs = this.dfsCluster.getFileSystem(); FSUtils.setFsDefault(this.conf, new Path(fs.getUri())); + if (this.conf.getBoolean(USE_LOCAL_FILESYSTEM, false)) { + FSUtils.setFsDefault(this.conf, new Path("file:///")); + } } public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index 50207162f7f..6426ec9afdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -122,12 +122,11 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { conf.set("hbase.coprocessor.region.classes", VisibilityController.class.getName()); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, ScanLabelGenerator.class); + util.setJobWithoutMRCluster(); util.startMiniCluster(); // Wait for the labels table to become available util.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000); createLabels(); - Admin admin = new HBaseAdmin(util.getConfiguration()); - util.startMiniMapReduceCluster(); } private static void createLabels() throws IOException, InterruptedException { @@ -151,7 +150,6 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { @AfterClass public static void releaseCluster() throws Exception { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index b41263ca518..8e451cd1bd2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -81,18 +81,16 @@ public abstract class TestTableInputFormatScanBase { // switch TIF to log at DEBUG level TEST_UTIL.enableDebug(TableInputFormat.class); TEST_UTIL.enableDebug(TableInputFormatBase.class); + TEST_UTIL.setJobWithoutMRCluster(); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table table = TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME), INPUT_FAMILY); TEST_UTIL.loadTable(table, INPUT_FAMILY, false); - // start MR cluster - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } From 3919f38e0d9b702d35535a20801f91bb78627412 Mon Sep 17 00:00:00 2001 From: anoopsjohn Date: Thu, 17 Dec 2015 22:49:49 +0530 Subject: [PATCH 29/72] HBASE-15000 Fix javadoc warn in LoadIncrementalHFiles. (Ashish) --- .../apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index a2b8b924618..9ac71b9404f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -452,11 +452,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * @param hfilesDir directory containing list of hfiles to be loaded into the table * @param table table to which hfiles should be loaded * @param queue queue which needs to be loaded into the table + * @param validateHFile if true hfiles will be validated for its format * @throws IOException If any I/O or network error occurred */ - public void prepareHFileQueue(Path hfofDir, Table table, Deque queue, + public void prepareHFileQueue(Path hfilesDir, Table table, Deque queue, boolean validateHFile) throws IOException { - discoverLoadQueue(queue, hfofDir, validateHFile); + discoverLoadQueue(queue, hfilesDir, validateHFile); validateFamiliesInHFiles(table, queue); } From a93c0e822ef4e2526d23e5cd9e0879806a409124 Mon Sep 17 00:00:00 2001 From: anoopsjohn Date: Thu, 17 Dec 2015 23:33:39 +0530 Subject: [PATCH 30/72] HBASE-14999 Remove ref to org.mortbay.log.Log. --- .../hbase/client/FlushRegionCallable.java | 7 +++++-- .../apache/hadoop/hbase/TestClassFinder.java | 18 +++++++++------- .../hbase/client/ClientSideRegionScanner.java | 9 +++++--- .../hbase/client/TestHBaseAdminNoCluster.java | 10 ++++++--- .../io/encoding/TestDataBlockEncoders.java | 16 ++++++++------ .../regionserver/TestHRegionOnCluster.java | 21 +++++++++++-------- .../TestRegionServerNoMaster.java | 7 +++++-- .../regionserver/TestSplitWalDataLoss.java | 7 +++++-- 8 files changed, 61 insertions(+), 34 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java index b2c4a57c0c9..73bdb740ca1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -31,7 +33,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.mortbay.log.Log; import com.google.protobuf.ServiceException; @@ -41,6 +42,8 @@ import com.google.protobuf.ServiceException; @InterfaceAudience.Private public class FlushRegionCallable extends RegionAdminServiceCallable { + private static final Log LOG = LogFactory.getLog(FlushRegionCallable.class); + private final byte[] regionName; private final boolean writeFlushWalMarker; private boolean reload; @@ -78,7 +81,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable> allClasses = allClassesFinder.findClasses(pkgName, false); @@ -246,7 +250,7 @@ public class TestClassFinder { final long counter = testCounter.incrementAndGet(); final String classNamePrefix = name.getMethodName(); String pkgNameSuffix = name.getMethodName(); - Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter)); + LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter)); final String classNameToFilterOut = classNamePrefix + counter; final ClassFinder.FileNameFilter notThisFilter = new ClassFinder.FileNameFilter() { @Override @@ -271,7 +275,7 @@ public class TestClassFinder { final long counter = testCounter.incrementAndGet(); final String classNamePrefix = name.getMethodName(); String pkgNameSuffix = name.getMethodName(); - Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter)); + LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter)); final Class clazz = makeClass(pkgNameSuffix, classNamePrefix, counter); final ClassFinder.ClassFilter notThisFilter = new ClassFinder.ClassFilter() { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index 1ec085f31a3..dde2f100120 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -33,7 +35,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionScanner; -import org.mortbay.log.Log; /** * A client scanner for a region opened for read-only on the client side. Assumes region data @@ -42,6 +43,8 @@ import org.mortbay.log.Log; @InterfaceAudience.Private public class ClientSideRegionScanner extends AbstractClientScanner { + private static final Log LOG = LogFactory.getLog(ClientSideRegionScanner.class); + private HRegion region; RegionScanner scanner; List values; @@ -96,7 +99,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner { this.scanner.close(); this.scanner = null; } catch (IOException ex) { - Log.warn("Exception while closing scanner", ex); + LOG.warn("Exception while closing scanner", ex); } } if (this.region != null) { @@ -105,7 +108,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner { this.region.close(true); this.region = null; } catch (IOException ex) { - Log.warn("Exception while closing region", ex); + LOG.warn("Exception while closing region", ex); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java index 19774d74fa8..c4a7ef81b61 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java @@ -24,6 +24,8 @@ import static org.mockito.Mockito.when; import java.io.IOException; import java.util.ArrayList; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -52,13 +54,15 @@ import org.mockito.Matchers; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.mortbay.log.Log; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @Category(SmallTests.class) public class TestHBaseAdminNoCluster { + + private static final Log LOG = LogFactory.getLog(TestHBaseAdminNoCluster.class); + /** * Verify that PleaseHoldException gets retried. * HBASE-8764 @@ -98,7 +102,7 @@ public class TestHBaseAdminNoCluster { admin.createTable(htd, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); fail(); } catch (RetriesExhaustedException e) { - Log.info("Expected fail", e); + LOG.info("Expected fail", e); } // Assert we were called 'count' times. Mockito.verify(masterAdmin, Mockito.atLeast(count)).createTable((RpcController)Mockito.any(), @@ -316,7 +320,7 @@ public class TestHBaseAdminNoCluster { caller.call(admin); // invoke the HBaseAdmin method fail(); } catch (RetriesExhaustedException e) { - Log.info("Expected fail", e); + LOG.info("Expected fail", e); } // Assert we were called 'count' times. caller.verify(masterAdmin, count); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index c9107e34881..1995aa4d693 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -30,6 +30,8 @@ import java.util.Collection; import java.util.List; import java.util.Random; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -51,7 +53,6 @@ import org.junit.rules.TestRule; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.mortbay.log.Log; /** * Test all of the data block encoding algorithms for correctness. Most of the @@ -60,6 +61,9 @@ import org.mortbay.log.Log; @Category(LargeTests.class) @RunWith(Parameterized.class) public class TestDataBlockEncoders { + + private static final Log LOG = LogFactory.getLog(TestDataBlockEncoders.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder(). withTimeout(this.getClass()).withLookingForStuckThread(true).build(); @@ -178,7 +182,7 @@ public class TestDataBlockEncoders { List encodedSeekers = new ArrayList(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { - Log.info("Encoding: " + encoding); + LOG.info("Encoding: " + encoding); // Off heap block data support not added for PREFIX_TREE DBE yet. // TODO remove this once support is added. HBASE-12298 if (encoding == DataBlockEncoding.PREFIX_TREE) continue; @@ -186,7 +190,7 @@ public class TestDataBlockEncoders { if (encoder == null) { continue; } - Log.info("Encoder: " + encoder); + LOG.info("Encoder: " + encoder); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, getEncodingContext(Compression.Algorithm.NONE, encoding)); HFileContext meta = new HFileContextBuilder() @@ -200,7 +204,7 @@ public class TestDataBlockEncoders { seeker.setCurrentBuffer(encodedBuffer); encodedSeekers.add(seeker); } - Log.info("Testing it!"); + LOG.info("Testing it!"); // test it! // try a few random seeks for (boolean seekBefore : new boolean[] { false, true }) { @@ -218,7 +222,7 @@ public class TestDataBlockEncoders { } // check edge cases - Log.info("Checking edge cases"); + LOG.info("Checking edge cases"); checkSeekingConsistency(encodedSeekers, false, sampleKv.get(0)); for (boolean seekBefore : new boolean[] { false, true }) { checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1)); @@ -226,7 +230,7 @@ public class TestDataBlockEncoders { KeyValue lastMidKv =KeyValueUtil.createLastOnRowCol(midKv); checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv); } - Log.info("Done"); + LOG.info("Done"); } static ByteBuffer encodeKeyValues(DataBlockEncoding encoding, List kvs, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java index 3676f934db5..05e4d705cd2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java @@ -23,6 +23,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -41,7 +43,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mortbay.log.Log; /** * Tests that need to spin up a cluster testing an {@link HRegion}. Use @@ -50,6 +51,8 @@ import org.mortbay.log.Log; */ @Category(MediumTests.class) public class TestHRegionOnCluster { + + private static final Log LOG = LogFactory.getLog(TestHRegionOnCluster.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @Test (timeout=300000) @@ -74,7 +77,7 @@ public class TestHRegionOnCluster { assertTrue(hbaseAdmin.isTableAvailable(TABLENAME)); // Put data: r1->v1 - Log.info("Loading r1 to v1 into " + TABLENAME); + LOG.info("Loading r1 to v1 into " + TABLENAME); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); putDataAndVerify(table, "r1", FAMILY, "v1", 1); @@ -88,7 +91,7 @@ public class TestHRegionOnCluster { assertFalse(originServer.equals(targetServer)); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); - Log.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName()); + LOG.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName()); hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(targetServer.getServerName().getServerName())); do { @@ -96,12 +99,12 @@ public class TestHRegionOnCluster { } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum); // Put data: r2->v2 - Log.info("Loading r2 to v2 into " + TABLENAME); + LOG.info("Loading r2 to v2 into " + TABLENAME); putDataAndVerify(table, "r2", FAMILY, "v2", 2); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); // Move region to origin server - Log.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName()); + LOG.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName()); hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(originServer.getServerName().getServerName())); do { @@ -109,11 +112,11 @@ public class TestHRegionOnCluster { } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum); // Put data: r3->v3 - Log.info("Loading r3 to v3 into " + TABLENAME); + LOG.info("Loading r3 to v3 into " + TABLENAME); putDataAndVerify(table, "r3", FAMILY, "v3", 3); // Kill target server - Log.info("Killing target server " + targetServer.getServerName()); + LOG.info("Killing target server " + targetServer.getServerName()); targetServer.kill(); cluster.getRegionServerThreads().get(targetServerNum).join(); // Wait until finish processing of shutdown @@ -121,12 +124,12 @@ public class TestHRegionOnCluster { Thread.sleep(5); } // Kill origin server - Log.info("Killing origin server " + targetServer.getServerName()); + LOG.info("Killing origin server " + targetServer.getServerName()); originServer.kill(); cluster.getRegionServerThreads().get(originServerNum).join(); // Put data: r4->v4 - Log.info("Loading r4 to v4 into " + TABLENAME); + LOG.info("Loading r4 to v4 into " + TABLENAME); putDataAndVerify(table, "r4", FAMILY, "v4", 4); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index d295170191a..a419b78aa1c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -52,7 +54,6 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mortbay.log.Log; import com.google.protobuf.ServiceException; @@ -63,6 +64,8 @@ import com.google.protobuf.ServiceException; @Category(MediumTests.class) public class TestRegionServerNoMaster { + private static final Log LOG = LogFactory.getLog(TestRegionServerNoMaster.class); + private static final int NB_SERVERS = 1; private static HTable table; private static final byte[] row = "ee".getBytes(); @@ -99,7 +102,7 @@ public class TestRegionServerNoMaster { ServerName masterAddr = master.getServerName(); master.stopMaster(); - Log.info("Waiting until master thread exits"); + LOG.info("Waiting until master thread exits"); while (masterThread != null && masterThread.isAlive()) { Threads.sleep(100); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java index 92e0558f0a2..102e7bc4d9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java @@ -26,6 +26,8 @@ import java.io.IOException; import java.util.Collection; import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -53,7 +55,6 @@ import org.junit.experimental.categories.Category; import org.mockito.Matchers; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.mortbay.log.Log; /** * Testcase for https://issues.apache.org/jira/browse/HBASE-13811 @@ -61,6 +62,8 @@ import org.mortbay.log.Log; @Category({ MediumTests.class }) public class TestSplitWalDataLoss { + private static final Log LOG = LogFactory.getLog(TestSplitWalDataLoss.class); + private final HBaseTestingUtility testUtil = new HBaseTestingUtility(); private NamespaceDescriptor namespace = NamespaceDescriptor.create(getClass().getSimpleName()) @@ -121,7 +124,7 @@ public class TestSplitWalDataLoss { table.put(new Put(Bytes.toBytes("row0")).addColumn(family, qualifier, Bytes.toBytes("val0"))); } long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family); - Log.info("CHANGE OLDEST " + oldestSeqIdOfStore); + LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore); assertTrue(oldestSeqIdOfStore > HConstants.NO_SEQNUM); rs.cacheFlusher.requestFlush(spiedRegion, false); synchronized (flushed) { From f3ebeeb3fced9cc1cc382e55d00e4fcb0b0390ab Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Thu, 17 Dec 2015 11:48:14 -0800 Subject: [PATCH 31/72] HBASE-14989 Implementation of Mutation.getWriteToWAL() is backwards --- .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index dbc13178e3d..2b88ffcf85c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -238,7 +238,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C */ @Deprecated public boolean getWriteToWAL() { - return this.durability == Durability.SKIP_WAL; + return this.durability != Durability.SKIP_WAL; } /** From c825a26ad1e0ee9c099e509453b69298f277880a Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 17 Dec 2015 00:51:37 -0800 Subject: [PATCH 32/72] HBASE-14978 Don't allow Multi to retain too many blocks --- .../hadoop/hbase/ipc/RpcCallContext.java | 3 + .../apache/hadoop/hbase/ipc/RpcServer.java | 11 +++ .../hbase/regionserver/RSRpcServices.java | 58 +++++++++----- .../hbase/client/TestMultiRespectsLimits.java | 75 +++++++++++++++++-- 4 files changed, 124 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 3e38dbf24b3..eb76748e7f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -82,4 +82,7 @@ public interface RpcCallContext extends Delayable { * onerous. */ void incrementResponseCellSize(long cellSize); + + long getResponseBlockSize(); + void incrementResponseBlockSize(long blockSize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 1a79e2ecd2c..0011c2e4989 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -318,6 +318,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private InetAddress remoteAddress; private long responseCellSize = 0; + private long responseBlockSize = 0; private boolean retryImmediatelySupported; Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header, @@ -541,6 +542,16 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { responseCellSize += cellSize; } + @Override + public long getResponseBlockSize() { + return responseBlockSize; + } + + @Override + public void incrementResponseBlockSize(long blockSize) { + responseBlockSize += blockSize; + } + /** * If we have a response, and delay is not set, then respond * immediately. Otherwise, do not respond to client. This is diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 8f85f512d3d..f3b4b168f74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -581,6 +581,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable()); RpcCallContext context = RpcServer.getCurrentCall(); IOException sizeIOE = null; + Object lastBlock = null; for (ClientProtos.Action action : actions.getActionList()) { ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = null; try { @@ -588,7 +589,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (context != null && context.isRetryImmediatelySupported() - && context.getResponseCellSize() > maxQuotaResultSize) { + && (context.getResponseCellSize() > maxQuotaResultSize + || context.getResponseBlockSize() > maxQuotaResultSize)) { // We're storing the exception since the exception and reason string won't // change after the response size limit is reached. @@ -597,15 +599,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Throwing will kill the JVM's JIT. // // Instead just create the exception and then store it. - sizeIOE = new MultiActionResultTooLarge("Max response size exceeded: " - + context.getResponseCellSize()); + sizeIOE = new MultiActionResultTooLarge("Max size exceeded" + + " CellSize: " + context.getResponseCellSize() + + " BlockSize: " + context.getResponseBlockSize()); // Only report the exception once since there's only one request that // caused the exception. Otherwise this number will dominate the exceptions count. rpcServer.getMetrics().exception(sizeIOE); } - // Now that there's an exception is know to be created + // Now that there's an exception is known to be created // use it for the response. // // This will create a copy in the builder. @@ -674,9 +677,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { pbResult = ProtobufUtil.toResult(r); } - if (context != null) { - context.incrementResponseCellSize(Result.getTotalSizeOfCells(r)); - } + lastBlock = addSize(context, r, lastBlock); resultOrExceptionBuilder = ClientProtos.ResultOrException.newBuilder().setResult(pbResult); } @@ -1002,6 +1003,32 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return scannerId; } + /** + * Method to account for the size of retained cells and retained data blocks. + * @return an object that represents the last referenced block from this response. + */ + Object addSize(RpcCallContext context, Result r, Object lastBlock) { + if (context != null && !r.isEmpty()) { + for (Cell c : r.rawCells()) { + context.incrementResponseCellSize(CellUtil.estimatedHeapSizeOf(c)); + + // We're using the last block being the same as the current block as + // a proxy for pointing to a new block. This won't be exact. + // If there are multiple gets that bounce back and forth + // Then it's possible that this will over count the size of + // referenced blocks. However it's better to over count and + // use two rpcs than to OOME the regionserver. + byte[] rowArray = c.getRowArray(); + if (rowArray != lastBlock) { + context.incrementResponseBlockSize(rowArray.length); + lastBlock = rowArray; + } + } + } + return lastBlock; + } + + /** * Find the HRegion based on a region specifier * @@ -2291,6 +2318,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, boolean closeScanner = false; boolean isSmallScan = false; RpcCallContext context = RpcServer.getCurrentCall(); + Object lastBlock = null; + ScanResponse.Builder builder = ScanResponse.newBuilder(); if (request.hasCloseScanner()) { closeScanner = request.getCloseScanner(); @@ -2379,11 +2408,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, scanner, results, rows); if (!results.isEmpty()) { for (Result r : results) { - for (Cell cell : r.rawCells()) { - if (context != null) { - context.incrementResponseCellSize(CellUtil.estimatedSerializedSizeOf(cell)); - } - } + lastBlock = addSize(context, r, lastBlock); } } if (bypass != null && bypass.booleanValue()) { @@ -2481,13 +2506,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, moreRows = scanner.nextRaw(values, scannerContext); if (!values.isEmpty()) { - for (Cell cell : values) { - if (context != null) { - context.incrementResponseCellSize(CellUtil.estimatedSerializedSizeOf(cell)); - } - } final boolean partial = scannerContext.partialResultFormed(); - results.add(Result.create(values, null, stale, partial)); + Result r = Result.create(values, null, stale, partial); + lastBlock = addSize(context, r, lastBlock); + results.add(r); i++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java index 47dd7beccfd..28e1855b70d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.metrics.BaseSource; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -36,6 +37,7 @@ import org.junit.experimental.categories.Category; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.ThreadLocalRandom; import static junit.framework.TestCase.assertEquals; @@ -73,7 +75,7 @@ public class TestMultiRespectsLimits { TEST_UTIL.loadTable(t, FAMILY, false); // Split the table to make sure that the chunking happens accross regions. - try (final Admin admin = TEST_UTIL.getHBaseAdmin()) { + try (final Admin admin = TEST_UTIL.getAdmin()) { admin.split(name); TEST_UTIL.waitFor(60000, new Waiter.Predicate() { @Override @@ -87,16 +89,79 @@ public class TestMultiRespectsLimits { for (int i = 0; i < MAX_SIZE; i++) { gets.add(new Get(HBaseTestingUtility.ROWS[i])); } - Result[] results = t.get(gets); - assertEquals(MAX_SIZE, results.length); + RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer(); BaseSource s = rpcServer.getMetrics().getMetricsSource(); + long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s); + long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s); + + Result[] results = t.get(gets); + assertEquals(MAX_SIZE, results.length); // Cells from TEST_UTIL.loadTable have a length of 27. // Multiplying by less than that gives an easy lower bound on size. // However in reality each kv is being reported as much higher than that. - METRICS_ASSERT.assertCounterGt("exceptions", (MAX_SIZE * 25) / MAX_SIZE, s); + METRICS_ASSERT.assertCounterGt("exceptions", + startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s); METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", - (MAX_SIZE * 25) / MAX_SIZE, s); + startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s); + } + + @Test + public void testBlockMultiLimits() throws Exception { + final TableName name = TableName.valueOf("testBlockMultiLimits"); + Table t = TEST_UTIL.createTable(name, FAMILY); + + final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); + RpcServerInterface rpcServer = regionServer.getRpcServer(); + BaseSource s = rpcServer.getMetrics().getMetricsSource(); + long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s); + long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s); + + byte[] row = Bytes.toBytes("TEST"); + byte[][] cols = new byte[][]{ + Bytes.toBytes("0"), // Get this + Bytes.toBytes("1"), // Buffer + Bytes.toBytes("2"), // Get This + Bytes.toBytes("3"), // Buffer + }; + + // Set the value size so that one result will be less than the MAX_SIE + // however the block being reference will be larger than MAX_SIZE. + // This should cause the regionserver to try and send a result immediately. + byte[] value = new byte[MAX_SIZE - 200]; + ThreadLocalRandom.current().nextBytes(value); + + for (byte[] col:cols) { + Put p = new Put(row); + p.addImmutable(FAMILY, col, value); + t.put(p); + } + + // Make sure that a flush happens + try (final Admin admin = TEST_UTIL.getAdmin()) { + admin.flush(name); + TEST_UTIL.waitFor(60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return regionServer.getOnlineRegions(name).get(0).getMaxFlushedSeqId() > 3; + } + }); + } + + List gets = new ArrayList<>(2); + Get g0 = new Get(row); + g0.addColumn(FAMILY, cols[0]); + gets.add(g0); + + Get g2 = new Get(row); + g2.addColumn(FAMILY, cols[2]); + gets.add(g2); + + Result[] results = t.get(gets); + assertEquals(2, results.length); + METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions, s); + METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", + startingMultiExceptions, s); } } From 345d008edeb927914e55a4a6802ffc06992ff066 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 17 Dec 2015 17:54:52 -0800 Subject: [PATCH 33/72] HBASE-14978 Don't allow Multi to retain too many blocks -- ADD --- .../apache/hadoop/hbase/client/TestMultiRespectsLimits.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java index 28e1855b70d..1febadf507d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java @@ -75,7 +75,7 @@ public class TestMultiRespectsLimits { TEST_UTIL.loadTable(t, FAMILY, false); // Split the table to make sure that the chunking happens accross regions. - try (final Admin admin = TEST_UTIL.getAdmin()) { + try (final Admin admin = TEST_UTIL.getHBaseAdmin()) { admin.split(name); TEST_UTIL.waitFor(60000, new Waiter.Predicate() { @Override @@ -139,7 +139,7 @@ public class TestMultiRespectsLimits { } // Make sure that a flush happens - try (final Admin admin = TEST_UTIL.getAdmin()) { + try (final Admin admin = TEST_UTIL.getHBaseAdmin()) { admin.flush(name); TEST_UTIL.waitFor(60000, new Waiter.Predicate() { @Override From 736dd2ed6df76249ff8b11a8212353e1db6b334e Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 17 Dec 2015 23:36:43 -0800 Subject: [PATCH 34/72] HBASE-15005 Use value array in computing block length for 1.2 and 1.3 --- .../hbase/regionserver/RSRpcServices.java | 11 +++++----- .../hbase/client/TestMultiRespectsLimits.java | 20 ++++++++++++++----- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index f3b4b168f74..f9f0e22f918 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1011,17 +1011,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (context != null && !r.isEmpty()) { for (Cell c : r.rawCells()) { context.incrementResponseCellSize(CellUtil.estimatedHeapSizeOf(c)); - // We're using the last block being the same as the current block as // a proxy for pointing to a new block. This won't be exact. // If there are multiple gets that bounce back and forth // Then it's possible that this will over count the size of // referenced blocks. However it's better to over count and - // use two rpcs than to OOME the regionserver. - byte[] rowArray = c.getRowArray(); - if (rowArray != lastBlock) { - context.incrementResponseBlockSize(rowArray.length); - lastBlock = rowArray; + // use two RPC's than to OOME the RegionServer. + byte[] valueArray = c.getValueArray(); + if (valueArray != lastBlock) { + context.incrementResponseBlockSize(valueArray.length); + lastBlock = valueArray; } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java index 1febadf507d..687da22580d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java @@ -20,9 +20,12 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -110,7 +113,12 @@ public class TestMultiRespectsLimits { @Test public void testBlockMultiLimits() throws Exception { final TableName name = TableName.valueOf("testBlockMultiLimits"); - Table t = TEST_UTIL.createTable(name, FAMILY); + HTableDescriptor desc = new HTableDescriptor(name); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); + hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); + desc.addFamily(hcd); + TEST_UTIL.getHBaseAdmin().createTable(desc); + Table t = TEST_UTIL.getConnection().getTable(name); final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); RpcServerInterface rpcServer = regionServer.getRpcServer(); @@ -122,14 +130,16 @@ public class TestMultiRespectsLimits { byte[][] cols = new byte[][]{ Bytes.toBytes("0"), // Get this Bytes.toBytes("1"), // Buffer - Bytes.toBytes("2"), // Get This - Bytes.toBytes("3"), // Buffer + Bytes.toBytes("2"), // Buffer + Bytes.toBytes("3"), // Get This + Bytes.toBytes("4"), // Buffer + Bytes.toBytes("5"), // Buffer }; // Set the value size so that one result will be less than the MAX_SIE // however the block being reference will be larger than MAX_SIZE. // This should cause the regionserver to try and send a result immediately. - byte[] value = new byte[MAX_SIZE - 200]; + byte[] value = new byte[MAX_SIZE - 100]; ThreadLocalRandom.current().nextBytes(value); for (byte[] col:cols) { @@ -155,7 +165,7 @@ public class TestMultiRespectsLimits { gets.add(g0); Get g2 = new Get(row); - g2.addColumn(FAMILY, cols[2]); + g2.addColumn(FAMILY, cols[3]); gets.add(g2); Result[] results = t.get(gets); From 299ade435a871009e879964dd19ff718ce20fffc Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 18 Dec 2015 09:31:22 -0800 Subject: [PATCH 35/72] HBASE-15009 Update test-patch.sh on branches; to fix curtailed build report --- dev-support/findHangingTest.sh | 40 ---- dev-support/findHangingTests.py | 82 ++++++++ dev-support/jenkinsEnv.sh | 2 +- dev-support/test-patch.sh | 349 +++++++++++++++++++++----------- dev-support/zombie-detector.sh | 166 +++++++++++++++ 5 files changed, 478 insertions(+), 161 deletions(-) delete mode 100755 dev-support/findHangingTest.sh create mode 100644 dev-support/findHangingTests.py create mode 100644 dev-support/zombie-detector.sh diff --git a/dev-support/findHangingTest.sh b/dev-support/findHangingTest.sh deleted file mode 100755 index f7ebe47f093..00000000000 --- a/dev-support/findHangingTest.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -## -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -## -# script to find hanging test from Jenkins build output -# usage: ./findHangingTest.sh -# -`curl -k -o jenkins.out "$1"` -expecting=Running -cat jenkins.out | while read line; do - if [[ "$line" =~ "Running org.apache.hadoop" ]]; then - if [[ "$expecting" =~ "Running" ]]; then - expecting=Tests - else - echo "Hanging test: $prevLine" - fi - fi - if [[ "$line" =~ "Tests run" ]]; then - expecting=Running - fi - if [[ "$line" =~ "Forking command line" ]]; then - a=$line - else - prevLine=$line - fi -done diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py new file mode 100644 index 00000000000..deccc8bd13b --- /dev/null +++ b/dev-support/findHangingTests.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## +# script to find hanging test from Jenkins build output +# usage: ./findHangingTests.py +# +import urllib2 +import sys +import string +if len(sys.argv) != 2 : + print "ERROR : Provide the jenkins job console URL as the only argument." + exit(1) +print "Fetching " + sys.argv[1] +response = urllib2.urlopen(sys.argv[1]) +i = 0; +tests = {} +failed_tests = {} +summary = 0 +host = False +patch = False +branch = False +while True: + n = response.readline() + if n == "" : + break + if not host and n.find("Building remotely on") >= 0: + host = True + print n.strip() + continue + if not patch and n.find("Testing patch for ") >= 0: + patch = True + print n.strip() + continue + if not branch and n.find("Testing patch on branch ") >= 0: + branch = True + print n.strip() + continue + if n.find("PATCH APPLICATION FAILED") >= 0: + print "PATCH APPLICATION FAILED" + sys.exit(1) + if summary == 0 and n.find("Running tests.") >= 0: + summary = summary + 1 + continue + if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0: + summary = summary + 1 + continue + if summary == 2 and n.find("[INFO] Apache HBase ") >= 0: + sys.stdout.write(n) + continue + if n.find("org.apache.hadoop.hbase") < 0: + continue + test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)]) + if n.find("Running org.apache.hadoop.hbase") > -1 : + tests[test_name] = False + if n.find("Tests run:") > -1 : + if n.find("FAILURE") > -1 or n.find("ERROR") > -1: + failed_tests[test_name] = True + tests[test_name] = True +response.close() + +print "Printing hanging tests" +for key, value in tests.iteritems(): + if value == False: + print "Hanging test : " + key +print "Printing Failing tests" +for key, value in failed_tests.iteritems(): + print "Failing test : " + key diff --git a/dev-support/jenkinsEnv.sh b/dev-support/jenkinsEnv.sh index a9919ff1631..6961437fd45 100755 --- a/dev-support/jenkinsEnv.sh +++ b/dev-support/jenkinsEnv.sh @@ -30,7 +30,7 @@ export CLOVER_HOME=/home/jenkins/tools/clover/latest export MAVEN_HOME=/home/jenkins/tools/maven/latest export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin: -export MAVEN_OPTS="-Xmx3100M -XX:-UsePerfData" +export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData -XX:MaxPermSize=256m"}" ulimit -n diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index bb26e959177..93663193255 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -20,7 +20,7 @@ #set -x ### Setup some variables. -### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process +### GIT_COMMIT and BUILD_URL are set by Hudson if it is run by patch process ### Read variables from properties file bindir=$(dirname $0) @@ -31,15 +31,20 @@ else MVN=$MAVEN_HOME/bin/mvn fi +NEWLINE=$'\n' + PROJECT_NAME=HBase JENKINS=false +MOVE_PATCH_DIR=true PATCH_DIR=/tmp BASEDIR=$(pwd) +BRANCH_NAME="master" + +. $BASEDIR/dev-support/test-patch.properties PS=${PS:-ps} AWK=${AWK:-awk} WGET=${WGET:-wget} -SVN=${SVN:-svn} GREP=${GREP:-grep} EGREP=${EGREP:-egrep} PATCH=${PATCH:-patch} @@ -47,6 +52,7 @@ JIRACLI=${JIRA:-jira} FINDBUGS_HOME=${FINDBUGS_HOME} FORREST_HOME=${FORREST_HOME} ECLIPSE_HOME=${ECLIPSE_HOME} +GIT=${GIT:-git} ############################################################################### printUsage() { @@ -62,12 +68,12 @@ printUsage() { echo "--mvn-cmd= The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')" echo "--ps-cmd= The 'ps' command to use (default 'ps')" echo "--awk-cmd= The 'awk' command to use (default 'awk')" - echo "--svn-cmd= The 'svn' command to use (default 'svn')" echo "--grep-cmd= The 'grep' command to use (default 'grep')" echo "--patch-cmd= The 'patch' command to use (default 'patch')" echo "--findbugs-home= Findbugs home directory (default FINDBUGS_HOME environment variable)" echo "--forrest-home= Forrest home directory (default FORREST_HOME environment variable)" - echo "--dirty-workspace Allow the local SVN workspace to have uncommitted changes" + echo "--dirty-workspace Allow the local workspace to have uncommitted changes" + echo "--git-cmd= The 'git' command to use (default 'git')" echo echo "Jenkins-only options:" echo "--jenkins Run by Jenkins (runs tests and posts results to JIRA)" @@ -85,6 +91,9 @@ parseArgs() { --jenkins) JENKINS=true ;; + --no-move-patch-dir) + MOVE_PATCH_DIR=false + ;; --patch-dir=*) PATCH_DIR=${i#*=} ;; @@ -103,9 +112,6 @@ parseArgs() { --wget-cmd=*) WGET=${i#*=} ;; - --svn-cmd=*) - SVN=${i#*=} - ;; --grep-cmd=*) GREP=${i#*=} ;; @@ -130,6 +136,9 @@ parseArgs() { --dirty-workspace) DIRTY_WORKSPACE=true ;; + --git-cmd=*) + GIT=${i#*=} + ;; *) PATCH_OR_DEFECT=$i ;; @@ -180,23 +189,92 @@ checkout () { echo "" ### When run by a developer, if the workspace contains modifications, do not continue ### unless the --dirty-workspace option was set - status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'` if [[ $JENKINS == "false" ]] ; then - if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then - echo "ERROR: can't run in a workspace that contains the following modifications" - echo "$status" - cleanupAndExit 1 + if [[ -z $DIRTY_WORKSPACE ]] ; then + # Ref http://stackoverflow.com/a/2659808 for details on checking dirty status + ${GIT} diff-index --quiet HEAD + if [[ $? -ne 0 ]] ; then + uncommitted=`${GIT} diff --name-only HEAD` + uncommitted="You have the following files with uncommitted changes:${NEWLINE}${uncommitted}" + fi + untracked="$(${GIT} ls-files --exclude-standard --others)" && test -z "${untracked}" + if [[ $? -ne 0 ]] ; then + untracked="You have untracked and unignored files:${NEWLINE}${untracked}" + fi + if [[ $uncommitted || $untracked ]] ; then + echo "ERROR: can't run in a workspace that contains modifications." + echo "Pass the '--dirty-workspace' flag to bypass." + echo "" + echo "${uncommitted}" + echo "" + echo "${untracked}" + cleanupAndExit 1 + fi fi echo - else - cd $BASEDIR - $SVN revert -R . - rm -rf `$SVN status --no-ignore` - $SVN update fi return $? } +findBranchNameFromPatchName() { + local patchName=$1 + for LOCAL_BRANCH_NAME in $BRANCH_NAMES; do + if [[ $patchName =~ /jira/secure/attachment/[0-9]*/.*$LOCAL_BRANCH_NAME ]]; then + BRANCH_NAME=$LOCAL_BRANCH_NAME + break + fi + done + return 0 +} + +checkoutBranch() { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Testing patch on branch ${BRANCH_NAME}." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + if [[ $JENKINS == "true" ]] ; then + if [[ "$BRANCH_NAME" != "master" ]]; then + echo "origin/${BRANCH_NAME} HEAD is commit `${GIT} rev-list origin/${BRANCH_NAME} -1`" + echo "${GIT} checkout -f `${GIT} rev-list origin/${BRANCH_NAME} -1`" + ${GIT} checkout -f `${GIT} rev-list origin/${BRANCH_NAME} -1` + echo "${GIT} status" + ${GIT} status + fi + fi +} + +############################################################################### +### Collect findbugs reports +collectFindbugsReports() { + name=$1 + basedir=$2 + patch_dir=$3 + for file in $(find $basedir -name findbugsXml.xml) + do + relative_file=${file#$basedir/} # strip leading $basedir prefix + if [ ! $relative_file == "target/findbugsXml.xml" ]; then + module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path + module_suffix=`basename ${module_suffix}` + fi + + cp $file $patch_dir/${name}FindbugsWarnings${module_suffix}.xml + $FINDBUGS_HOME/bin/setBugDatabaseInfo -name $name \ + $patch_dir/${name}FindbugsWarnings${module_suffix}.xml \ + $patch_dir/${name}FindbugsWarnings${module_suffix}.xml + done + xml_file=$patch_dir/${name}FindbugsWarnings.xml + html_file=$patch_dir/${name}FindbugsWarnings.html + $FINDBUGS_HOME/bin/unionBugs -withMessages \ + -output $xml_file $patch_dir/${name}FindbugsWarnings*.xml + $FINDBUGS_HOME/bin/convertXmlToText -html $xml_file $html_file + file $xml_file $html_file +} + ############################################################################### setup () { ### Download latest patch file (ignoring .htm and .html) when run from patch process @@ -219,10 +297,12 @@ setup () { echo "$defect patch is being downloaded at `date` from" echo "$patchURL" $WGET -q -O $PATCH_DIR/patch $patchURL - VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum} + VERSION=${GIT_COMMIT}_${defect}_PATCH-${patchNum} + findBranchNameFromPatchName ${relativePatchURL} + checkoutBranch JIRA_COMMENT="Here are the results of testing the latest attachment $patchURL - against trunk revision ${SVN_REVISION}. + against ${BRANCH_NAME} branch at commit ${GIT_COMMIT}. ATTACHMENT ID: ${ATTACHMENT_ID}" ### Copy the patch file to $PATCH_DIR @@ -236,11 +316,9 @@ setup () { cleanupAndExit 0 fi fi - . $BASEDIR/dev-support/test-patch.properties ### exit if warnings are NOT defined in the properties file - if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]] ; then + if [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]] ; then echo "Please define the following properties in test-patch.properties file" - echo "OK_FINDBUGS_WARNINGS" echo "OK_RELEASEAUDIT_WARNINGS" echo "OK_JAVADOC_WARNINGS" cleanupAndExit 1 @@ -249,22 +327,28 @@ setup () { echo "" echo "======================================================================" echo "======================================================================" - echo " Pre-build trunk to verify trunk stability and javac warnings" + echo " Pre-build master to verify stability and javac warnings" echo "======================================================================" echo "======================================================================" echo "" echo "" - echo "$MVN clean package checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1" + echo "$MVN clean package checkstyle:checkstyle-aggregate findbugs:findbugs -DskipTests \ + -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1" export MAVEN_OPTS="${MAVEN_OPTS}" # build core and tests - $MVN clean package checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1 + $MVN clean package checkstyle:checkstyle-aggregate findbugs:findbugs -DskipTests \ + -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1 if [[ $? != 0 ]] ; then + echo "mvn exit code was $?" ERR=`$GREP -A 5 'Compilation failure' $PATCH_DIR/trunkJavacWarnings.txt` - echo "Trunk compilation is broken? - {code}$ERR{code}" - cleanupAndExit 1 + if [[ ${#ERR} -ge 1 ]] ; then + echo "Trunk compilation is broken? + {code}$ERR{code}" + cleanupAndExit 1 + fi fi mv target/checkstyle-result.xml $PATCH_DIR/trunkCheckstyle.xml + collectFindbugsReports trunk $BASEDIR $PATCH_DIR } ############################################################################### @@ -318,6 +402,16 @@ checkTests () { return 0 fi fi + srcReferences=`${GREP} "diff --git" "${PATCH_DIR}/patch" | ${GREP} "src/main" | \ + ${GREP} -v "src/main/asciidoc" | ${GREP} -v "src/main/site" -c` + if [[ $srcReferences == 0 ]] ; then + echo "The patch doesn't appear to alter any code that requires tests." + JIRA_COMMENT="$JIRA_COMMENT + + {color:green}+0 tests included{color}. The patch appears to be a documentation, build, + or dev-support patch that doesn't require tests." + return 0 + fi JIRA_COMMENT="$JIRA_COMMENT {color:red}-1 tests included{color}. The patch doesn't appear to include any new or modified tests. @@ -335,21 +429,26 @@ checkTests () { ### Check there are no compilation errors, passing a file to be parsed. checkCompilationErrors() { local file=$1 + hadoopVersion="" + if [ "$#" -ne 1 ]; then + hadoopVersion="with Hadoop version $2" + fi COMPILATION_ERROR=false eval $(awk '/ERROR/ {print "COMPILATION_ERROR=true"}' $file) if $COMPILATION_ERROR ; then ERRORS=$($AWK '/ERROR/ { print $0 }' $file) echo "======================================================================" - echo "There are compilation errors." + echo "There are compilation errors $hadoopVersion." echo "======================================================================" echo "$ERRORS" JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 javac{color}. The patch appears to cause mvn compile goal to fail. + {color:red}-1 javac{color}. The patch appears to cause mvn compile goal to fail $hadoopVersion. Compilation errors resume: $ERRORS " + submitJiraComment 1 cleanupAndExit 1 fi } @@ -418,9 +517,8 @@ checkAntiPatterns () { if [[ $warnings != "" ]]; then JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 Anti-pattern{color}. The patch appears to have anti-pattern where BYTES_COMPARATOR was omitted: - $warnings." - return 1 + {color:red}-1 Anti-pattern{color}. The patch appears to have anti-pattern where BYTES_COMPARATOR was omitted: $warnings." + return 1 fi return 0 } @@ -441,9 +539,8 @@ checkInterfaceAudience () { if [[ $warnings != "" ]]; then JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 InterfaceAudience{color}. The patch appears to contain InterfaceAudience from hadoop rather than hbase: - $warnings." - return 1 + {color:red}-1 InterfaceAudience{color}. The patch appears to contain InterfaceAudience from hadoop rather than hbase: $warnings." + return 1 fi return 0 } @@ -473,6 +570,9 @@ checkJavadocWarnings () { JIRA_COMMENT="$JIRA_COMMENT {color:red}-1 javadoc{color}. The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages." + # Add javadoc output url + JIRA_COMMENT_FOOTER="Javadoc warnings: $BUILD_URL/artifact/patchprocess/patchJavadocWarnings.txt +$JIRA_COMMENT_FOOTER" return 1 fi JIRA_COMMENT="$JIRA_COMMENT @@ -481,6 +581,31 @@ checkJavadocWarnings () { return 0 } +checkBuildWithHadoopVersions() { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Building with all supported Hadoop versions ." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + export MAVEN_OPTS="${MAVEN_OPTS}" + for HADOOP2_VERSION in $HADOOP2_VERSIONS ; do + echo "$MVN clean install -DskipTests -D${PROJECT_NAME}PatchProcess -Dhadoop-two.version=$HADOOP2_VERSION > $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt 2>&1" + $MVN clean install -DskipTests -D${PROJECT_NAME}PatchProcess -Dhadoop-two.version=$HADOOP2_VERSION > $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt 2>&1 + checkCompilationErrors $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt $HADOOP2_VERSION + done + + # TODO: add Hadoop3 versions and compilation here when we get the hadoop.profile=3.0 working + + JIRA_COMMENT="$JIRA_COMMENT + + {color:green}+1 hadoop versions{color}. The patch compiles with all supported hadoop versions ($HADOOP2_VERSIONS)" + return 0 +} + ############################################################################### ### Check there are no changes in the number of Javac warnings checkJavacWarnings () { @@ -506,7 +631,7 @@ checkJavacWarnings () { if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 javac{color}. The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)." + {color:red}-1 javac{color}. The applied patch generated $patchJavacWarnings javac compiler warnings (more than the master's current $trunkJavacWarnings warnings)." return 1 fi fi @@ -532,23 +657,25 @@ checkCheckstyleErrors() { mv target/checkstyle-result.xml $PATCH_DIR/patchCheckstyle.xml mv target/site/checkstyle-aggregate.html $PATCH_DIR mv target/site/checkstyle.css $PATCH_DIR - trunkCheckstyleErrors=`$GREP ' $PATCH_DIR/patchReleaseAuditProblems.txt echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." >> $PATCH_DIR/patchReleaseAuditProblems.txt JIRA_COMMENT_FOOTER="Release audit warnings: $BUILD_URL/artifact/patchprocess/patchReleaseAuditWarnings.txt @@ -638,41 +765,36 @@ checkFindbugsWarnings () { {color:red}-1 findbugs{color}. The patch appears to cause Findbugs (version ${findbugs_version}) to fail." return 1 fi - - findbugsWarnings=0 - for file in $(find $BASEDIR -name findbugsXml.xml) - do - relative_file=${file#$BASEDIR/} # strip leading $BASEDIR prefix - if [ ! $relative_file == "target/findbugsXml.xml" ]; then - module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path - module_suffix=`basename ${module_suffix}` - fi - - cp $file $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml - $FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \ - $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \ - $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml - newFindbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \ - $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml | $AWK '{print $1}'` - echo "Found $newFindbugsWarnings Findbugs warnings ($file)" - findbugsWarnings=$((findbugsWarnings+newFindbugsWarnings)) - $FINDBUGS_HOME/bin/convertXmlToText -html \ - $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml \ - $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html - JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/patchprocess/newPatchFindbugsWarnings${module_suffix}.html -$JIRA_COMMENT_FOOTER" - done - ### if current warnings greater than OK_FINDBUGS_WARNINGS - if [[ $findbugsWarnings -gt $OK_FINDBUGS_WARNINGS ]] ; then + collectFindbugsReports patch $BASEDIR $PATCH_DIR + #this files are generated by collectFindbugsReports() named with its first argument + patch_xml=$PATCH_DIR/patchFindbugsWarnings.xml + trunk_xml=$PATCH_DIR/trunkFindbugsWarnings.xml + # combine them to one database + combined_xml=$PATCH_DIR/combinedFindbugsWarnings.xml + new_xml=$PATCH_DIR/newFindbugsWarnings.xml + new_html=$PATCH_DIR/newFindbugsWarnings.html + $FINDBUGS_HOME/bin/computeBugHistory -useAnalysisTimes -withMessages \ + -output $combined_xml $trunk_xml $patch_xml + findbugsWarnings=$($FINDBUGS_HOME/bin/filterBugs -first patch $combined_xml $new_xml) + findbugsFixedWarnings=$($FINDBUGS_HOME/bin/filterBugs -fixed patch $combined_xml $new_xml) + $FINDBUGS_HOME/bin/convertXmlToText -html $new_xml $new_html + file $new_xml $new_html + JIRA_COMMENT_FOOTER="Release Findbugs (version ${findbugs_version}) \ + warnings: $BUILD_URL/artifact/patchprocess/newFindbugsWarnings.html +$JIRA_COMMENT_FOOTER" + ### if current warnings greater than 0, fail + if [[ $findbugsWarnings -gt 0 ]] ; then JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 findbugs{color}. The patch appears to introduce `expr $(($findbugsWarnings-$OK_FINDBUGS_WARNINGS))` new Findbugs (version ${findbugs_version}) warnings." + {color:red}-1 findbugs{color}. The patch appears to introduce $findbugsWarnings \ + new Findbugs (version ${findbugs_version}) warnings." return 1 fi JIRA_COMMENT="$JIRA_COMMENT - {color:green}+1 findbugs{color}. The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings." + {color:green}+1 findbugs{color}. The patch does not introduce any \ + new Findbugs (version ${findbugs_version}) warnings." return 0 } @@ -691,7 +813,7 @@ checkLineLengths () { #see http://en.wikipedia.org/wiki/Diff#Unified_format MAX_LINE_LENGTH_PATCH=`expr $MAX_LINE_LENGTH + 1` - lines=`cat $PATCH_DIR/patch | grep "^+" | grep -v "^@@" | grep -v "^+++" | grep -v "import" | grep -v "org.apache.thrift." | grep -v "com.google.protobuf." | grep -v "hbase.protobuf.generated" | awk -v len="$MAX_LINE_LENGTH_PATCH" 'length ($0) > len' | head -n 10` + lines=`cat $PATCH_DIR/patch | grep "^+" | grep -v "^@@" | grep -v "^+++" | grep -v "import" | grep -v "org.apache.thrift." | grep -v "com.google.protobuf." | grep -v "protobuf.generated" | awk -v len="$MAX_LINE_LENGTH_PATCH" 'length ($0) > len' | head -n 10` ll=`echo "$lines" | wc -l` if [[ "$ll" -gt "1" ]]; then JIRA_COMMENT="$JIRA_COMMENT @@ -707,12 +829,6 @@ checkLineLengths () { return 0 } -zombieCount() { - # HBase tests have been flagged with an innocuous '-Dhbase.test' just so they can - # be identified as hbase in a process listing. - echo `jps -v | grep -e surefirebooter -e '-Dhbase.test' | wc -l` -} - ############################################################################### ### Run the tests runTests () { @@ -727,55 +843,29 @@ runTests () { echo "" failed_tests="" - ### Kill any rogue build processes from the last attempt - condemnedCount=`$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'` - echo "WARNING: $condemnedCount rogue build processes detected, terminating." - $PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null - echo "$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess" + echo "$MVN clean test -Dsurefire.rerunFailingTestsCount=2 -P runAllTests -D${PROJECT_NAME}PatchProcess" export MAVEN_OPTS="${MAVEN_OPTS}" ulimit -a - $MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess + $MVN clean test -Dsurefire.rerunFailingTestsCount=2 -P runAllTests -D${PROJECT_NAME}PatchProcess + # Need to export this so the zombie subshell picks up current content + export JIRA_COMMENT if [[ $? != 0 ]] ; then ### Find and format names of failed tests failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E " $PATCH_DIR/patchSiteOutput.txt 2>&1" + echo "$MVN package post-site -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchSiteOutput.txt 2>&1" export MAVEN_OPTS="${MAVEN_OPTS}" - $MVN package site -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchSiteOutput.txt 2>&1 + $MVN package post-site -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchSiteOutput.txt 2>&1 if [[ $? != 0 ]] ; then JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 site{color}. The patch appears to cause mvn site goal to fail." + {color:red}-1 site{color}. The patch appears to cause mvn post-site goal to fail." return 1 fi JIRA_COMMENT="$JIRA_COMMENT - {color:green}+1 site{color}. The mvn site goal succeeds with this patch." + {color:green}+1 site{color}. The mvn post-site goal succeeds with this patch." return 0 } @@ -883,8 +973,9 @@ $comment" ### Cleanup files cleanupAndExit () { local result=$1 - if [[ $JENKINS == "true" ]] ; then + if [[ ${JENKINS} == "true" && ${MOVE_PATCH_DIR} == "true" ]] ; then if [ -e "$PATCH_DIR" ] ; then + echo "Relocating patch dir into ${BASEDIR}" mv $PATCH_DIR $BASEDIR fi fi @@ -913,8 +1004,10 @@ This message is automatically generated." parseArgs $@ cd $BASEDIR +echo "Version of this script: Wed Oct 14 00:29:04 PDT 2015" checkout RESULT=$? +echo "RESULT = " $RESULT if [[ $JENKINS == "true" ]] ; then if [[ $RESULT != 0 ]] ; then exit 100 @@ -923,8 +1016,10 @@ fi setup checkAuthor RESULT=$? +echo "RESULT = " $RESULT checkTests (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT applyPatch if [[ $? != 0 ]] ; then submitJiraComment 1 @@ -933,28 +1028,42 @@ fi checkAntiPatterns (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT +checkBuildWithHadoopVersions +(( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkJavacWarnings (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkProtocErrors (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkJavadocWarnings (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkCheckstyleErrors (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkInterfaceAudience (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkFindbugsWarnings (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkReleaseAuditWarnings (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkLineLengths (( RESULT = RESULT + $? )) +echo "RESULT = " $RESULT checkSiteXml (( RESULT = RESULT + $?)) -### Do not call these when run by a developer +echo "RESULT = " $RESULT +### Do not call these when run by a developer if [[ $JENKINS == "true" ]] ; then runTests (( RESULT = RESULT + $? )) + echo "RESULT = " $RESULT JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/ $JIRA_COMMENT_FOOTER" fi diff --git a/dev-support/zombie-detector.sh b/dev-support/zombie-detector.sh new file mode 100644 index 00000000000..df4c197ce4d --- /dev/null +++ b/dev-support/zombie-detector.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Looks for any running zombies left over from old build runs. +# Will report and try to do stack trace on stale processes so can +# figure how they are hung. Echos state as the script runs +# on STDERR but prints final output on STDOUT formatted so it +# will fold into the test result formatting done by test-patch.sh. +# This script is called from test-patch.sh but also after tests +# have run up on builds.apache.org. + +# TODO: format output to suit context -- test-patch, jenkins or dev env + +#set -x +# printenv + +### Setup some variables. +bindir=$(dirname $0) + +# This key is set by our surefire configuration up in the main pom.xml +# This key needs to match the key we set up there. +HBASE_BUILD_ID_KEY="hbase.build.id=" +JENKINS= + +PS=${PS:-ps} +AWK=${AWK:-awk} +WGET=${WGET:-wget} +GREP=${GREP:-grep} +JIRACLI=${JIRA:-jira} + +############################################################################### +printUsage() { + echo "Usage: $0 [options]" BUILD_ID + echo + echo "Where:" + echo " BUILD_ID is build id to look for in process listing" + echo + echo "Options:" + echo "--ps-cmd= The 'ps' command to use (default 'ps')" + echo "--awk-cmd= The 'awk' command to use (default 'awk')" + echo "--grep-cmd= The 'grep' command to use (default 'grep')" + echo + echo "Jenkins-only options:" + echo "--jenkins Run by Jenkins (runs tests and posts results to JIRA)" + echo "--wget-cmd= The 'wget' command to use (default 'wget')" + echo "--jira-cmd= The 'jira' command to use (default 'jira')" +} + +############################################################################### +parseArgs() { + for i in $* + do + case $i in + --jenkins) + JENKINS=true + ;; + --ps-cmd=*) + PS=${i#*=} + ;; + --awk-cmd=*) + AWK=${i#*=} + ;; + --wget-cmd=*) + WGET=${i#*=} + ;; + --grep-cmd=*) + GREP=${i#*=} + ;; + --jira-cmd=*) + JIRACLI=${i#*=} + ;; + *) + BUILD_ID=$i + ;; + esac + done + if [ -z "$BUILD_ID" ]; then + printUsage + exit 1 + fi +} + +### Return list of the processes found with passed build id. +find_processes () { + jps -v | grep surefirebooter | grep -e "${HBASE_BUILD_TAG}" +} + +### Look for zombies +zombies () { + ZOMBIES=`find_processes` + if [[ -z ${ZOMBIES} ]] + then + ZOMBIE_TESTS_COUNT=0 + else + ZOMBIE_TESTS_COUNT=`echo "${ZOMBIES}"| wc -l| xargs` + fi + if [[ $ZOMBIE_TESTS_COUNT != 0 ]] ; then + wait=30 + echo "`date` Found ${ZOMBIE_TESTS_COUNT} suspicious java process(es) listed below; waiting ${wait}s to see if just slow to stop" >&2 + echo ${ZOMBIES} >&2 + sleep ${wait} + PIDS=`echo "${ZOMBIES}"|${AWK} '{print $1}'` + ZOMBIE_TESTS_COUNT=0 + for pid in $PIDS + do + # Test our zombie still running (and that it still an hbase build item) + PS_OUTPUT=`ps -p $pid | tail +2 | grep -e "${HBASE_BUILD_TAG}"` + if [[ ! -z "${PS_OUTPUT}" ]] + then + echo "`date` Zombie: $PS_OUTPUT" >&2 + let "ZOMBIE_TESTS_COUNT+=1" + PS_STACK=`jstack $pid | grep -e "\.Test" | grep -e "\.java"| head -3` + echo "${PS_STACK}" >&2 + ZB_STACK="${ZB_STACK}\nPID=${pid} ${PS_STACK}" + fi + done + if [[ $ZOMBIE_TESTS_COUNT != 0 ]] + then + echo "`date` There are ${ZOMBIE_TESTS_COUNT} possible zombie test(s)." >&2 + # If JIRA_COMMENT in environment, append our findings to it + echo -e "$JIRA_COMMENT + + {color:red}+1 zombies{red}. There are ${ZOMBIE_TESTS_COUNT} possible zombie test(s) + ${ZB_STACK}" + # Exit with exit code of 1. + exit 1 + else + echo "`date` We're ok: there was a zombie candidate but it went away" >&2 + echo "$JIRA_COMMENT + + {color:green}+1 zombies{color}. No zombie tests found running at the end of the build (There were candidates but they seem to have gone away)." + fi + else + echo "`date` We're ok: there is no zombie test" >&2 + echo "$JIRA_COMMENT + + {color:green}+1 zombies{color}. No zombie tests found running at the end of the build." + fi +} + +### Check if arguments to the script have been specified properly or not +parseArgs $@ +HBASE_BUILD_TAG="${HBASE_BUILD_ID_KEY}${BUILD_ID}" +zombies +RESULT=$? +if [[ $JENKINS == "true" ]] ; then + if [[ $RESULT != 0 ]] ; then + exit 100 + fi +fi +RESULT=$? From d1c412ae8f714c56eede8f0aca8d9c37bc1ed97a Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Thu, 17 Dec 2015 22:50:19 -0600 Subject: [PATCH 36/72] HBASE-15003 remove BoundedConcurrentLinkedQueue. Signed-off-by: Matteo Bertozzi Conflicts: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java --- .../util/BoundedConcurrentLinkedQueue.java | 122 ------------- .../TestBoundedConcurrentLinkedQueue.java | 160 ------------------ 2 files changed, 282 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java deleted file mode 100644 index f66771bfa56..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.util.Collection; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * A ConcurrentLinkedQueue that enforces a maximum queue size. - */ -@InterfaceAudience.Private -@InterfaceStability.Stable -public class BoundedConcurrentLinkedQueue extends ConcurrentLinkedQueue { - private static final long serialVersionUID = 1L; - private final AtomicLong size = new AtomicLong(0L); - private final long maxSize; - - public BoundedConcurrentLinkedQueue() { - this(Long.MAX_VALUE); - } - - public BoundedConcurrentLinkedQueue(long maxSize) { - super(); - this.maxSize = maxSize; - } - - @Override - public boolean addAll(Collection c) { - for (;;) { - long currentSize = size.get(); - long nextSize = currentSize + c.size(); - if (nextSize > maxSize) { // already exceeded limit - return false; - } - if (size.compareAndSet(currentSize, nextSize)) { - break; - } - } - return super.addAll(c); // Always true for ConcurrentLinkedQueue - } - - @Override - public void clear() { - // override this method to batch update size. - long removed = 0L; - while (super.poll() != null) { - removed++; - } - size.addAndGet(-removed); - } - - @Override - public boolean offer(T e) { - for (;;) { - long currentSize = size.get(); - if (currentSize >= maxSize) { // already exceeded limit - return false; - } - if (size.compareAndSet(currentSize, currentSize + 1)) { - break; - } - } - return super.offer(e); // Always true for ConcurrentLinkedQueue - } - - @Override - public T poll() { - T result = super.poll(); - if (result != null) { - size.decrementAndGet(); - } - return result; - } - - @Override - public boolean remove(Object o) { - boolean result = super.remove(o); - if (result) { - size.decrementAndGet(); - } - return result; - } - - @Override - public int size() { - return (int) size.get(); - } - - public void drainTo(Collection list) { - long removed = 0; - for (T element; (element = super.poll()) != null;) { - list.add(element); - removed++; - } - // Limit the number of operations on size by only reporting size change after the drain is - // completed. - size.addAndGet(-removed); - } - - public long remainingCapacity() { - return maxSize - size.get(); - } -} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java deleted file mode 100644 index d3c206f681a..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hbase.util; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(SmallTests.class) -public class TestBoundedConcurrentLinkedQueue { - private final static int CAPACITY = 16; - - private BoundedConcurrentLinkedQueue queue; - - @Before - public void setUp() throws Exception { - this.queue = new BoundedConcurrentLinkedQueue(CAPACITY); - } - - @Test - public void testOfferAndPoll() throws Exception { - // Offer - for (long i = 1; i <= CAPACITY; ++i) { - assertTrue(queue.offer(i)); - assertEquals(i, queue.size()); - assertEquals(CAPACITY - i, queue.remainingCapacity()); - } - assertFalse(queue.offer(0L)); - - // Poll - for (int i = 1; i <= CAPACITY; ++i) { - long l = queue.poll(); - assertEquals(i, l); - assertEquals(CAPACITY - i, queue.size()); - assertEquals(i, queue.remainingCapacity()); - } - assertEquals(null, queue.poll()); - } - - @Test - public void testDrain() throws Exception { - // Offer - for (long i = 1; i <= CAPACITY; ++i) { - assertTrue(queue.offer(i)); - assertEquals(i, queue.size()); - assertEquals(CAPACITY - i, queue.remainingCapacity()); - } - assertFalse(queue.offer(0L)); - - // Drain - List list = new ArrayList(); - queue.drainTo(list); - assertEquals(null, queue.poll()); - assertEquals(0, queue.size()); - assertEquals(CAPACITY, queue.remainingCapacity()); - } - - @Test - public void testClear() { - // Offer - for (long i = 1; i <= CAPACITY; ++i) { - assertTrue(queue.offer(i)); - assertEquals(i, queue.size()); - assertEquals(CAPACITY - i, queue.remainingCapacity()); - } - assertFalse(queue.offer(0L)); - - queue.clear(); - assertEquals(null, queue.poll()); - assertEquals(0, queue.size()); - assertEquals(CAPACITY, queue.remainingCapacity()); - } - - @Test - public void testMultiThread() throws InterruptedException { - int offerThreadCount = 10; - int pollThreadCount = 5; - int duration = 5000; // ms - final AtomicBoolean stop = new AtomicBoolean(false); - Thread[] offerThreads = new Thread[offerThreadCount]; - for (int i = 0; i < offerThreadCount; i++) { - offerThreads[i] = new Thread("offer-thread-" + i) { - - @Override - public void run() { - Random rand = new Random(); - while (!stop.get()) { - queue.offer(rand.nextLong()); - try { - Thread.sleep(1); - } catch (InterruptedException e) { - } - } - } - - }; - } - Thread[] pollThreads = new Thread[pollThreadCount]; - for (int i = 0; i < pollThreadCount; i++) { - pollThreads[i] = new Thread("poll-thread-" + i) { - - @Override - public void run() { - while (!stop.get()) { - queue.poll(); - try { - Thread.sleep(1); - } catch (InterruptedException e) { - } - } - } - - }; - } - for (Thread t : offerThreads) { - t.start(); - } - for (Thread t : pollThreads) { - t.start(); - } - long startTime = System.currentTimeMillis(); - while (System.currentTimeMillis() - startTime < duration) { - assertTrue(queue.size() <= CAPACITY); - Thread.yield(); - } - stop.set(true); - for (Thread t : offerThreads) { - t.join(); - } - for (Thread t : pollThreads) { - t.join(); - } - assertTrue(queue.size() <= CAPACITY); - } -} From b3300602ed3af9254eabe0bf3db0570cbf01ae05 Mon Sep 17 00:00:00 2001 From: Ashu Pachauri Date: Thu, 17 Dec 2015 13:25:39 -0800 Subject: [PATCH 37/72] HBASE-15001 Fix thread-safety issues with replication ReplicationSinkManager and HBaseInterClusterReplicationEndpoint perform certain unsafe operations which might lead to undesirable behavior with multiwal enabled. Signed-off-by: Elliott Clark --- .../HBaseInterClusterReplicationEndpoint.java | 27 ++++++++++++++----- .../regionserver/ReplicationSinkManager.java | 21 ++++++++++----- .../TestReplicationSinkManager.java | 26 +++++++++--------- 3 files changed, 49 insertions(+), 25 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 6bc8c6ad3c9..b94d21d5ffc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -144,9 +144,9 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi int sleepMultiplier = 1; // Connect to peer cluster first, unless we have to stop - while (this.isRunning() && replicationSinkMgr.getSinks().size() == 0) { + while (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { replicationSinkMgr.chooseSinks(); - if (this.isRunning() && replicationSinkMgr.getSinks().size() == 0) { + if (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { if (sleepForRetries("Waiting for peers", sleepMultiplier)) { sleepMultiplier++; } @@ -181,19 +181,24 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi List entries = replicateContext.getEntries(); String walGroupId = replicateContext.getWalGroupId(); int sleepMultiplier = 1; + int numReplicated = 0; if (!peersSelected && this.isRunning()) { connectToPeers(); peersSelected = true; } - if (replicationSinkMgr.getSinks().size() == 0) { + int numSinks = replicationSinkMgr.getNumSinks(); + if (numSinks == 0) { + LOG.warn("No replication sinks found, returning without replicating. The source should retry" + + " with the same set of edits."); return false; } + // minimum of: configured threads, number of 100-waledit batches, // and number of current sinks - int n = Math.min(Math.min(this.maxThreads, entries.size()/100+1), - replicationSinkMgr.getSinks().size()); + int n = Math.min(Math.min(this.maxThreads, entries.size()/100+1), numSinks); + List> entryLists = new ArrayList>(n); if (n == 1) { entryLists.add(entries); @@ -238,7 +243,11 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // wait for all futures, remove successful parts // (only the remaining parts will be retried) Future f = pool.take(); - entryLists.set(f.get().intValue(), Collections.emptyList()); + int index = f.get().intValue(); + int batchSize = entryLists.get(index).size(); + entryLists.set(index, Collections.emptyList()); + // Now, we have marked the batch as done replicating, record its size + numReplicated += batchSize; } catch (InterruptedException ie) { iox = new IOException(ie); } catch (ExecutionException ee) { @@ -250,6 +259,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // if we had any exceptions, try again throw iox; } + if (numReplicated != entries.size()) { + // Something went wrong here and we don't know what, let's just fail and retry. + LOG.warn("The number of edits replicated is different from the number received," + + " failing for now."); + return false; + } // update metrics this.metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java index 76fa6c2c699..0469f9b6dcb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -105,7 +106,7 @@ public class ReplicationSinkManager { * * @return a replication sink to replicate to */ - public SinkPeer getReplicationSink() throws IOException { + public synchronized SinkPeer getReplicationSink() throws IOException { if (endpoint.getLastRegionServerUpdate() > this.lastUpdateToPeers || sinks.isEmpty()) { LOG.info("Current list of sinks is out of date or empty, updating"); chooseSinks(); @@ -127,7 +128,7 @@ public class ReplicationSinkManager { * @param sinkPeer * The SinkPeer that had a failed replication attempt on it */ - public void reportBadSink(SinkPeer sinkPeer) { + public synchronized void reportBadSink(SinkPeer sinkPeer) { ServerName serverName = sinkPeer.getServerName(); int badReportCount = (badReportCounts.containsKey(serverName) ? badReportCounts.get(serverName) : 0) + 1; @@ -146,11 +147,14 @@ public class ReplicationSinkManager { * @param sinkPeer * The SinkPeer that had a failed replication attempt on it */ - public void reportSinkSuccess(SinkPeer sinkPeer) { + public synchronized void reportSinkSuccess(SinkPeer sinkPeer) { badReportCounts.remove(sinkPeer.getServerName()); } - void chooseSinks() { + /** + * Refresh the list of sinks. + */ + public synchronized void chooseSinks() { List slaveAddresses = endpoint.getRegionServers(); Collections.shuffle(slaveAddresses, random); int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); @@ -159,8 +163,13 @@ public class ReplicationSinkManager { badReportCounts.clear(); } - List getSinks() { - return sinks; + public synchronized int getNumSinks() { + return sinks.size(); + } + + @VisibleForTesting + protected List getSinksForTesting() { + return Collections.unmodifiableList(sinks); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java index 4eb7f51e3d0..c0b7d0c9e59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java @@ -66,7 +66,7 @@ public class TestReplicationSinkManager { sinkManager.chooseSinks(); - assertEquals(2, sinkManager.getSinks().size()); + assertEquals(2, sinkManager.getNumSinks()); } @@ -80,7 +80,7 @@ public class TestReplicationSinkManager { sinkManager.chooseSinks(); - assertEquals(1, sinkManager.getSinks().size()); + assertEquals(1, sinkManager.getNumSinks()); } @Test @@ -92,14 +92,14 @@ public class TestReplicationSinkManager { sinkManager.chooseSinks(); // Sanity check - assertEquals(1, sinkManager.getSinks().size()); + assertEquals(1, sinkManager.getNumSinks()); SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AdminService.BlockingInterface.class)); sinkManager.reportBadSink(sinkPeer); // Just reporting a bad sink once shouldn't have an effect - assertEquals(1, sinkManager.getSinks().size()); + assertEquals(1, sinkManager.getNumSinks()); } @@ -119,9 +119,9 @@ public class TestReplicationSinkManager { sinkManager.chooseSinks(); // Sanity check - assertEquals(3, sinkManager.getSinks().size()); + assertEquals(3, sinkManager.getNumSinks()); - ServerName serverName = sinkManager.getSinks().get(0); + ServerName serverName = sinkManager.getSinksForTesting().get(0); SinkPeer sinkPeer = new SinkPeer(serverName, mock(AdminService.BlockingInterface.class)); @@ -132,12 +132,12 @@ public class TestReplicationSinkManager { // Reporting a bad sink more than the threshold count should remove it // from the list of potential sinks - assertEquals(2, sinkManager.getSinks().size()); + assertEquals(2, sinkManager.getNumSinks()); // // now try a sink that has some successes // - serverName = sinkManager.getSinks().get(0); + serverName = sinkManager.getSinksForTesting().get(0); sinkPeer = new SinkPeer(serverName, mock(AdminService.BlockingInterface.class)); for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-1; i++) { @@ -147,17 +147,17 @@ public class TestReplicationSinkManager { sinkManager.reportBadSink(sinkPeer); // did not remove the sink, since we had one successful try - assertEquals(2, sinkManager.getSinks().size()); + assertEquals(2, sinkManager.getNumSinks()); for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-2; i++) { sinkManager.reportBadSink(sinkPeer); } // still not remove, since the success reset the counter - assertEquals(2, sinkManager.getSinks().size()); + assertEquals(2, sinkManager.getNumSinks()); sinkManager.reportBadSink(sinkPeer); // but we exhausted the tries - assertEquals(1, sinkManager.getSinks().size()); + assertEquals(1, sinkManager.getNumSinks()); } @Test @@ -173,7 +173,7 @@ public class TestReplicationSinkManager { sinkManager.chooseSinks(); // Sanity check - List sinkList = sinkManager.getSinks(); + List sinkList = sinkManager.getSinksForTesting(); assertEquals(2, sinkList.size()); ServerName serverNameA = sinkList.get(0); @@ -189,7 +189,7 @@ public class TestReplicationSinkManager { // We've gone down to 0 good sinks, so the replication sinks // should have been refreshed now - assertEquals(2, sinkManager.getSinks().size()); + assertEquals(2, sinkManager.getNumSinks()); } } From d06bb5e05f4edbd5aeca8317cccefcf1f26f9deb Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 18 Dec 2015 12:50:04 -0800 Subject: [PATCH 38/72] Revert "HBASE-14979 Update to the newest Zookeeper release" This reverts commit a290a5d978a5eb730d9708bc2c0384d22fc37fb6. --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 12ab9ebe064..95996502384 100644 --- a/pom.xml +++ b/pom.xml @@ -1144,7 +1144,7 @@ 2.5.0 thrift 0.9.3 - 3.4.7 + 3.4.6 1.7.7 4.0.3 2.4.1 From 7ac9eb9d951c4ed873e655e361f4403dfa06e430 Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 18 Dec 2015 13:38:22 -0800 Subject: [PATCH 39/72] HBASE-15009 Update test-patch.sh on branches; to fix curtailed build report; ADDENDUM set +x permission on zombie script --- dev-support/zombie-detector.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 dev-support/zombie-detector.sh diff --git a/dev-support/zombie-detector.sh b/dev-support/zombie-detector.sh old mode 100644 new mode 100755 From 574774d8b103b958880ce2bb7f605568511eabda Mon Sep 17 00:00:00 2001 From: anoopsjohn Date: Sat, 19 Dec 2015 14:58:52 +0530 Subject: [PATCH 40/72] HBASE-13158 When client supports CellBlock, return the result Cells as controller payload for get(Get) API also. --- .../apache/hadoop/hbase/client/HTable.java | 2 +- .../RpcRetryingCallerWithReadReplicas.java | 2 +- .../hadoop/hbase/protobuf/ProtobufUtil.java | 8 +- .../hbase/protobuf/generated/HBaseProtos.java | 211 ++++++++++++++++-- hbase-protocol/src/main/protobuf/HBase.proto | 2 + .../hadoop/hbase/client/VersionInfoUtil.java | 8 + .../hbase/regionserver/RSRpcServices.java | 17 +- 7 files changed, 230 insertions(+), 20 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 2793ae2a35b..35d9a74a956 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -861,7 +861,7 @@ public class HTable implements HTableInterface, RegionLocator { try { ClientProtos.GetResponse response = getStub().get(controller, request); if (response == null) return null; - return ProtobufUtil.toResult(response.getResult()); + return ProtobufUtil.toResult(response.getResult(), controller.cellScanner()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 025daa02c94..80c187c37d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -164,7 +164,7 @@ public class RpcRetryingCallerWithReadReplicas { if (response == null) { return null; } - return ProtobufUtil.toResult(response.getResult()); + return ProtobufUtil.toResult(response.getResult(), controller.cellScanner()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 3b046e609e0..907650b2cb9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -3210,7 +3210,13 @@ public final class ProtobufUtil { */ public static HBaseProtos.VersionInfo getVersionInfo() { HBaseProtos.VersionInfo.Builder builder = HBaseProtos.VersionInfo.newBuilder(); - builder.setVersion(VersionInfo.getVersion()); + String version = VersionInfo.getVersion(); + builder.setVersion(version); + String[] components = version.split("\\."); + if (components != null && components.length > 2) { + builder.setVersionMajor(Integer.parseInt(components[0])); + builder.setVersionMinor(Integer.parseInt(components[1])); + } builder.setUrl(VersionInfo.getUrl()); builder.setRevision(VersionInfo.getRevision()); builder.setUser(VersionInfo.getUser()); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 5c337c36505..0e914907aac 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -16798,6 +16798,26 @@ public final class HBaseProtos { */ com.google.protobuf.ByteString getSrcChecksumBytes(); + + // optional uint32 version_major = 7; + /** + * optional uint32 version_major = 7; + */ + boolean hasVersionMajor(); + /** + * optional uint32 version_major = 7; + */ + int getVersionMajor(); + + // optional uint32 version_minor = 8; + /** + * optional uint32 version_minor = 8; + */ + boolean hasVersionMinor(); + /** + * optional uint32 version_minor = 8; + */ + int getVersionMinor(); } /** * Protobuf type {@code hbase.pb.VersionInfo} @@ -16884,6 +16904,16 @@ public final class HBaseProtos { srcChecksum_ = input.readBytes(); break; } + case 56: { + bitField0_ |= 0x00000040; + versionMajor_ = input.readUInt32(); + break; + } + case 64: { + bitField0_ |= 0x00000080; + versionMinor_ = input.readUInt32(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17182,6 +17212,38 @@ public final class HBaseProtos { } } + // optional uint32 version_major = 7; + public static final int VERSION_MAJOR_FIELD_NUMBER = 7; + private int versionMajor_; + /** + * optional uint32 version_major = 7; + */ + public boolean hasVersionMajor() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional uint32 version_major = 7; + */ + public int getVersionMajor() { + return versionMajor_; + } + + // optional uint32 version_minor = 8; + public static final int VERSION_MINOR_FIELD_NUMBER = 8; + private int versionMinor_; + /** + * optional uint32 version_minor = 8; + */ + public boolean hasVersionMinor() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint32 version_minor = 8; + */ + public int getVersionMinor() { + return versionMinor_; + } + private void initFields() { version_ = ""; url_ = ""; @@ -17189,6 +17251,8 @@ public final class HBaseProtos { user_ = ""; date_ = ""; srcChecksum_ = ""; + versionMajor_ = 0; + versionMinor_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17244,6 +17308,12 @@ public final class HBaseProtos { if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBytes(6, getSrcChecksumBytes()); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt32(7, versionMajor_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt32(8, versionMinor_); + } getUnknownFields().writeTo(output); } @@ -17277,6 +17347,14 @@ public final class HBaseProtos { size += com.google.protobuf.CodedOutputStream .computeBytesSize(6, getSrcChecksumBytes()); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(7, versionMajor_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(8, versionMinor_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -17330,6 +17408,16 @@ public final class HBaseProtos { result = result && getSrcChecksum() .equals(other.getSrcChecksum()); } + result = result && (hasVersionMajor() == other.hasVersionMajor()); + if (hasVersionMajor()) { + result = result && (getVersionMajor() + == other.getVersionMajor()); + } + result = result && (hasVersionMinor() == other.hasVersionMinor()); + if (hasVersionMinor()) { + result = result && (getVersionMinor() + == other.getVersionMinor()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -17367,6 +17455,14 @@ public final class HBaseProtos { hash = (37 * hash) + SRC_CHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getSrcChecksum().hashCode(); } + if (hasVersionMajor()) { + hash = (37 * hash) + VERSION_MAJOR_FIELD_NUMBER; + hash = (53 * hash) + getVersionMajor(); + } + if (hasVersionMinor()) { + hash = (37 * hash) + VERSION_MINOR_FIELD_NUMBER; + hash = (53 * hash) + getVersionMinor(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -17492,6 +17588,10 @@ public final class HBaseProtos { bitField0_ = (bitField0_ & ~0x00000010); srcChecksum_ = ""; bitField0_ = (bitField0_ & ~0x00000020); + versionMajor_ = 0; + bitField0_ = (bitField0_ & ~0x00000040); + versionMinor_ = 0; + bitField0_ = (bitField0_ & ~0x00000080); return this; } @@ -17544,6 +17644,14 @@ public final class HBaseProtos { to_bitField0_ |= 0x00000020; } result.srcChecksum_ = srcChecksum_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.versionMajor_ = versionMajor_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + result.versionMinor_ = versionMinor_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -17590,6 +17698,12 @@ public final class HBaseProtos { srcChecksum_ = other.srcChecksum_; onChanged(); } + if (other.hasVersionMajor()) { + setVersionMajor(other.getVersionMajor()); + } + if (other.hasVersionMinor()) { + setVersionMinor(other.getVersionMinor()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18085,6 +18199,72 @@ public final class HBaseProtos { return this; } + // optional uint32 version_major = 7; + private int versionMajor_ ; + /** + * optional uint32 version_major = 7; + */ + public boolean hasVersionMajor() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional uint32 version_major = 7; + */ + public int getVersionMajor() { + return versionMajor_; + } + /** + * optional uint32 version_major = 7; + */ + public Builder setVersionMajor(int value) { + bitField0_ |= 0x00000040; + versionMajor_ = value; + onChanged(); + return this; + } + /** + * optional uint32 version_major = 7; + */ + public Builder clearVersionMajor() { + bitField0_ = (bitField0_ & ~0x00000040); + versionMajor_ = 0; + onChanged(); + return this; + } + + // optional uint32 version_minor = 8; + private int versionMinor_ ; + /** + * optional uint32 version_minor = 8; + */ + public boolean hasVersionMinor() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint32 version_minor = 8; + */ + public int getVersionMinor() { + return versionMinor_; + } + /** + * optional uint32 version_minor = 8; + */ + public Builder setVersionMinor(int value) { + bitField0_ |= 0x00000080; + versionMinor_ = value; + onChanged(); + return this; + } + /** + * optional uint32 version_minor = 8; + */ + public Builder clearVersionMinor() { + bitField0_ = (bitField0_ & ~0x00000080); + versionMinor_ = 0; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.VersionInfo) } @@ -18927,20 +19107,21 @@ public final class HBaseProtos { "al_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits\030\001" + " \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Namespace" + "Descriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfiguratio" + - "n\030\002 \003(\0132\030.hbase.pb.NameStringPair\"o\n\013Ver" + - "sionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022\020" + - "\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004date\030\005" + - " \002(\t\022\024\n\014src_checksum\030\006 \002(\t\"Q\n\020RegionServ" + - "erInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014version_info", - "\030\002 \001(\0132\025.hbase.pb.VersionInfo*r\n\013Compare" + - "Type\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQU" + - "AL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020" + - "\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n" + - "\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILL" + - "ISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005" + - "HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.h" + - "base.protobuf.generatedB\013HBaseProtosH\001\240\001" + - "\001" + "n\030\002 \003(\0132\030.hbase.pb.NameStringPair\"\235\001\n\013Ve" + + "rsionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022" + + "\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004date\030" + + "\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rversion_m" + + "ajor\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r\"Q\n\020Reg", + "ionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014versi" + + "on_info\030\002 \001(\0132\025.hbase.pb.VersionInfo*r\n\013" + + "CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001" + + "\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR" + + "_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Time" + + "Unit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022" + + "\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTE" + + "S\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.h" + + "adoop.hbase.protobuf.generatedB\013HBasePro" + + "tosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -19084,7 +19265,7 @@ public final class HBaseProtos { internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_VersionInfo_descriptor, - new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", }); + new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", }); internal_static_hbase_pb_RegionServerInfo_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto index 28b226bc65c..820dbebe935 100644 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ b/hbase-protocol/src/main/protobuf/HBase.proto @@ -227,6 +227,8 @@ message VersionInfo { required string user = 4; required string date = 5; required string src_checksum = 6; + optional uint32 version_major = 7; + optional uint32 version_minor = 8; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java index c40551899ed..618777b44ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -44,6 +44,14 @@ public final class VersionInfoUtil { int major, int minor) { if (versionInfo != null) { + if (versionInfo.hasVersionMajor() && versionInfo.hasVersionMinor()) { + int clientMajor = versionInfo.getVersionMajor(); + if (clientMajor != major) { + return clientMajor > major; + } + int clientMinor = versionInfo.getVersionMinor(); + return clientMinor >= minor; + } try { String[] components = versionInfo.getVersion().split("\\."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index f9f0e22f918..2cdf46dbcfd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.coordination.CloseRegionCoordination; import org.apache.hadoop.hbase.coordination.OpenRegionCoordination; @@ -367,7 +368,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @return True if current call supports cellblocks */ private boolean isClientCellBlockSupport() { - RpcCallContext context = RpcServer.getCurrentCall(); + return isClientCellBlockSupport(RpcServer.getCurrentCall()); + } + + private boolean isClientCellBlockSupport(RpcCallContext context) { return context != null && context.isClientCellBlockSupported(); } @@ -2031,7 +2035,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0); builder.setResult(pbr); } else if (r != null) { - ClientProtos.Result pbr = ProtobufUtil.toResult(r); + ClientProtos.Result pbr; + RpcCallContext call = RpcServer.getCurrentCall(); + if (isClientCellBlockSupport(call) && controller instanceof PayloadCarryingRpcController + && VersionInfoUtil.hasMinimumVersion(call.getClientVersionInfo(), 1, 3)) { + pbr = ProtobufUtil.toResultNoData(r); + ((PayloadCarryingRpcController) controller) + .setCellScanner(CellUtil.createCellScanner(r.rawCells())); + } else { + pbr = ProtobufUtil.toResult(r); + } builder.setResult(pbr); } if (r != null) { From c27d2c9c3e2fe59906ac6ba7ac2a5d08e5928d43 Mon Sep 17 00:00:00 2001 From: Lars Hofhansl Date: Sat, 19 Dec 2015 10:15:26 -0800 Subject: [PATCH 41/72] HBASE-14822 Renewing leases of scanners doesn't work. --- .../hadoop/hbase/client/ClientScanner.java | 4 +- .../hadoop/hbase/client/ScannerCallable.java | 12 +- .../client/ScannerCallableWithReplicas.java | 4 + .../hbase/protobuf/RequestConverter.java | 4 +- .../protobuf/generated/ClientProtos.java | 218 +++++++++++++----- hbase-protocol/src/main/protobuf/Client.proto | 1 + .../hbase/regionserver/RSRpcServices.java | 7 + .../hbase/client/TestFromClientSide3.java | 25 -- .../hadoop/hbase/client/TestLeaseRenewal.java | 125 ++++++++++ 9 files changed, 307 insertions(+), 93 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index b99406330c3..22c1c9535ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -760,13 +760,13 @@ public class ClientScanner extends AbstractClientScanner { public boolean renewLease() { if (callable != null) { // do not return any rows, do not advance the scanner - callable.setCaching(0); + callable.setRenew(true); try { this.caller.callWithoutRetries(callable, this.scannerTimeout); } catch (Exception e) { return false; } finally { - callable.setCaching(this.caching); + callable.setRenew(false); } return true; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 93d3fb5328d..51003149af2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -72,6 +72,7 @@ public class ScannerCallable extends RegionServerCallable { protected long scannerId = -1L; protected boolean instantiated = false; protected boolean closed = false; + protected boolean renew = false; private Scan scan; private int caching = 1; protected final ClusterConnection cConnection; @@ -209,7 +210,7 @@ public class ScannerCallable extends RegionServerCallable { incRPCcallsMetrics(); request = RequestConverter.buildScanRequest(scannerId, caching, false, nextCallSeq, - this.scanMetrics != null); + this.scanMetrics != null, renew); ScanResponse response = null; controller = controllerFactory.newController(); controller.setPriority(getTableName()); @@ -413,6 +414,15 @@ public class ScannerCallable extends RegionServerCallable { this.closed = true; } + /** + * Indicate whether we make a call only to renew the lease, but without affected the scanner in + * any other way. + * @param val true if only the lease should be renewed + */ + public void setRenew(boolean val) { + this.renew = val; + } + /** * @return the HRegionInfo for the current region */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java index 7418292bad6..f8feca168bd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java @@ -98,6 +98,10 @@ class ScannerCallableWithReplicas implements RetryingCallable { currentScannerCallable.setClose(); } + public void setRenew(boolean val) { + currentScannerCallable.setRenew(val); + } + public void setCaching(int caching) { currentScannerCallable.setCaching(caching); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 1a22106bb51..74c18b09226 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -529,7 +529,8 @@ public final class RequestConverter { * @return a scan request */ public static ScanRequest buildScanRequest(final long scannerId, final int numberOfRows, - final boolean closeScanner, final long nextCallSeq, final boolean trackMetrics) { + final boolean closeScanner, final long nextCallSeq, final boolean trackMetrics, + final boolean renew) { ScanRequest.Builder builder = ScanRequest.newBuilder(); builder.setNumberOfRows(numberOfRows); builder.setCloseScanner(closeScanner); @@ -538,6 +539,7 @@ public final class RequestConverter { builder.setClientHandlesPartials(true); builder.setClientHandlesHeartbeats(true); builder.setTrackScanMetrics(trackMetrics); + builder.setRenew(renew); return builder.build(); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 315eee1ec30..e6e715dff3b 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -17303,6 +17303,16 @@ public final class ClientProtos { * optional bool track_scan_metrics = 9; */ boolean getTrackScanMetrics(); + + // optional bool renew = 10 [default = false]; + /** + * optional bool renew = 10 [default = false]; + */ + boolean hasRenew(); + /** + * optional bool renew = 10 [default = false]; + */ + boolean getRenew(); } /** * Protobuf type {@code hbase.pb.ScanRequest} @@ -17429,6 +17439,11 @@ public final class ClientProtos { trackScanMetrics_ = input.readBool(); break; } + case 80: { + bitField0_ |= 0x00000200; + renew_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17625,6 +17640,22 @@ public final class ClientProtos { return trackScanMetrics_; } + // optional bool renew = 10 [default = false]; + public static final int RENEW_FIELD_NUMBER = 10; + private boolean renew_; + /** + * optional bool renew = 10 [default = false]; + */ + public boolean hasRenew() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bool renew = 10 [default = false]; + */ + public boolean getRenew() { + return renew_; + } + private void initFields() { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance(); @@ -17635,6 +17666,7 @@ public final class ClientProtos { clientHandlesPartials_ = false; clientHandlesHeartbeats_ = false; trackScanMetrics_ = false; + renew_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17687,6 +17719,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000100) == 0x00000100)) { output.writeBool(9, trackScanMetrics_); } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBool(10, renew_); + } getUnknownFields().writeTo(output); } @@ -17732,6 +17767,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeBoolSize(9, trackScanMetrics_); } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(10, renew_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -17800,6 +17839,11 @@ public final class ClientProtos { result = result && (getTrackScanMetrics() == other.getTrackScanMetrics()); } + result = result && (hasRenew() == other.hasRenew()); + if (hasRenew()) { + result = result && (getRenew() + == other.getRenew()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -17849,6 +17893,10 @@ public final class ClientProtos { hash = (37 * hash) + TRACK_SCAN_METRICS_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getTrackScanMetrics()); } + if (hasRenew()) { + hash = (37 * hash) + RENEW_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getRenew()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -17999,6 +18047,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000080); trackScanMetrics_ = false; bitField0_ = (bitField0_ & ~0x00000100); + renew_ = false; + bitField0_ = (bitField0_ & ~0x00000200); return this; } @@ -18071,6 +18121,10 @@ public final class ClientProtos { to_bitField0_ |= 0x00000100; } result.trackScanMetrics_ = trackScanMetrics_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000200; + } + result.renew_ = renew_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -18114,6 +18168,9 @@ public final class ClientProtos { if (other.hasTrackScanMetrics()) { setTrackScanMetrics(other.getTrackScanMetrics()); } + if (other.hasRenew()) { + setRenew(other.getRenew()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18618,6 +18675,39 @@ public final class ClientProtos { return this; } + // optional bool renew = 10 [default = false]; + private boolean renew_ ; + /** + * optional bool renew = 10 [default = false]; + */ + public boolean hasRenew() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bool renew = 10 [default = false]; + */ + public boolean getRenew() { + return renew_; + } + /** + * optional bool renew = 10 [default = false]; + */ + public Builder setRenew(boolean value) { + bitField0_ |= 0x00000200; + renew_ = value; + onChanged(); + return this; + } + /** + * optional bool renew = 10 [default = false]; + */ + public Builder clearRenew() { + bitField0_ = (bitField0_ & ~0x00000200); + renew_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.ScanRequest) } @@ -34289,76 +34379,76 @@ public final class ClientProtos { "\025.hbase.pb.Consistency:\006STRONG\022\017\n\007cachin" + "g\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 \001(\010\0226" + "\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.ColumnF" + - "amilyTimeRange\"\220\002\n\013ScanRequest\022)\n\006region", + "amilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006region", "\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004sca" + "n\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 " + "\001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_sca" + "nner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027cli" + "ent_handles_partials\030\007 \001(\010\022!\n\031client_han" + "dles_heartbeats\030\010 \001(\010\022\032\n\022track_scan_metr" + - "ics\030\t \001(\010\"\232\002\n\014ScanResponse\022\030\n\020cells_per_" + - "result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more" + - "_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005" + - " \003(\0132\020.hbase.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n", - "\027partial_flag_per_result\030\007 \003(\010\022\036\n\026more_r" + - "esults_in_region\030\010 \001(\010\022\031\n\021heartbeat_mess" + - "age\030\t \001(\010\022+\n\014scan_metrics\030\n \001(\0132\025.hbase." + - "pb.ScanMetrics\"\305\001\n\024BulkLoadHFileRequest\022" + - ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" + - "er\022>\n\013family_path\030\002 \003(\0132).hbase.pb.BulkL" + - "oadHFileRequest.FamilyPath\022\026\n\016assign_seq" + - "_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014" + - "\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022" + - "\016\n\006loaded\030\001 \002(\010\"a\n\026CoprocessorServiceCal", - "l\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013" + - "method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Co" + - "processorServiceResult\022&\n\005value\030\001 \001(\0132\027." + - "hbase.pb.NameBytesPair\"v\n\031CoprocessorSer" + - "viceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" + - "egionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb." + - "CoprocessorServiceCall\"o\n\032CoprocessorSer" + - "viceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb." + - "RegionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.p" + - "b.NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(", - "\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.MutationP" + - "roto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014serv" + - "ice_call\030\004 \001(\0132 .hbase.pb.CoprocessorSer" + - "viceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(\013" + - "2\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002 " + - "\001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c\n" + - "\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\001" + - "0\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compactio" + - "nPressure\030\003 \001(\005:\0010\"\332\001\n\021ResultOrException" + - "\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.", - "pb.Result\022*\n\texception\030\003 \001(\0132\027.hbase.pb." + - "NameBytesPair\022:\n\016service_result\030\004 \001(\0132\"." + - "hbase.pb.CoprocessorServiceResult\022,\n\tloa" + - "dStats\030\005 \001(\0132\031.hbase.pb.RegionLoadStats\"" + - "x\n\022RegionActionResult\0226\n\021resultOrExcepti" + - "on\030\001 \003(\0132\033.hbase.pb.ResultOrException\022*\n" + - "\texception\030\002 \001(\0132\027.hbase.pb.NameBytesPai" + - "r\"x\n\014MultiRequest\022,\n\014regionAction\030\001 \003(\0132" + - "\026.hbase.pb.RegionAction\022\022\n\nnonceGroup\030\002 " + - "\001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb.Condit", - "ion\"\\\n\rMultiResponse\0228\n\022regionActionResu" + - "lt\030\001 \003(\0132\034.hbase.pb.RegionActionResult\022\021" + - "\n\tprocessed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRO" + - "NG\020\000\022\014\n\010TIMELINE\020\0012\203\004\n\rClientService\0222\n\003" + - "Get\022\024.hbase.pb.GetRequest\032\025.hbase.pb.Get" + - "Response\022;\n\006Mutate\022\027.hbase.pb.MutateRequ" + - "est\032\030.hbase.pb.MutateResponse\0225\n\004Scan\022\025." + - "hbase.pb.ScanRequest\032\026.hbase.pb.ScanResp" + - "onse\022P\n\rBulkLoadHFile\022\036.hbase.pb.BulkLoa" + - "dHFileRequest\032\037.hbase.pb.BulkLoadHFileRe", - "sponse\022X\n\013ExecService\022#.hbase.pb.Coproce" + - "ssorServiceRequest\032$.hbase.pb.Coprocesso" + - "rServiceResponse\022d\n\027ExecRegionServerServ" + + "ics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n\014Scan" + + "Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" + + "anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" + + "ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re", + "sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" + + "result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" + + " \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" + + "metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" + + "\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031." + + "hbase.pb.RegionSpecifier\022>\n\013family_path\030" + + "\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" + + "milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" + + "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" + + "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n", + "\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" + + "service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" + + "\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" + + "sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" + + "Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg" + + "ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" + + "call\030\002 \002(\0132 .hbase.pb.CoprocessorService" + + "Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" + + "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" + + "\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001", + "\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" + + "\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" + + "\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" + + "base.pb.CoprocessorServiceCall\"k\n\014Region" + + "Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region" + + "Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" + + "\0132\020.hbase.pb.Action\"c\n\017RegionLoadStats\022\027" + + "\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" + + "\030\002 \001(\005:\0010\022\035\n\022compactionPressure\030\003 \001(\005:\0010" + + "\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022 \n", + "\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\texcep" + + "tion\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022:\n\016s" + + "ervice_result\030\004 \001(\0132\".hbase.pb.Coprocess" + + "orServiceResult\022,\n\tloadStats\030\005 \001(\0132\031.hba" + + "se.pb.RegionLoadStats\"x\n\022RegionActionRes" + + "ult\0226\n\021resultOrException\030\001 \003(\0132\033.hbase.p" + + "b.ResultOrException\022*\n\texception\030\002 \001(\0132\027" + + ".hbase.pb.NameBytesPair\"x\n\014MultiRequest\022" + + ",\n\014regionAction\030\001 \003(\0132\026.hbase.pb.RegionA" + + "ction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition\030\003", + " \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRespon" + + "se\0228\n\022regionActionResult\030\001 \003(\0132\034.hbase.p" + + "b.RegionActionResult\022\021\n\tprocessed\030\002 \001(\010*" + + "\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\001" + + "2\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb.Get" + + "Request\032\025.hbase.pb.GetResponse\022;\n\006Mutate" + + "\022\027.hbase.pb.MutateRequest\032\030.hbase.pb.Mut" + + "ateResponse\0225\n\004Scan\022\025.hbase.pb.ScanReque" + + "st\032\026.hbase.pb.ScanResponse\022P\n\rBulkLoadHF" + + "ile\022\036.hbase.pb.BulkLoadHFileRequest\032\037.hb", + "ase.pb.BulkLoadHFileResponse\022X\n\013ExecServ" + "ice\022#.hbase.pb.CoprocessorServiceRequest" + - "\032$.hbase.pb.CoprocessorServiceResponse\0228" + - "\n\005Multi\022\026.hbase.pb.MultiRequest\032\027.hbase." + - "pb.MultiResponseBB\n*org.apache.hadoop.hb" + - "ase.protobuf.generatedB\014ClientProtosH\001\210\001" + - "\001\240\001\001" + "\032$.hbase.pb.CoprocessorServiceResponse\022d" + + "\n\027ExecRegionServerService\022#.hbase.pb.Cop" + + "rocessorServiceRequest\032$.hbase.pb.Coproc" + + "essorServiceResponse\0228\n\005Multi\022\026.hbase.pb" + + ".MultiRequest\032\027.hbase.pb.MultiResponseBB" + + "\n*org.apache.hadoop.hbase.protobuf.gener" + + "atedB\014ClientProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -34454,7 +34544,7 @@ public final class ClientProtos { internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ScanRequest_descriptor, - new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", "ClientHandlesHeartbeats", "TrackScanMetrics", }); + new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", "ClientHandlesHeartbeats", "TrackScanMetrics", "Renew", }); internal_static_hbase_pb_ScanResponse_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_hbase_pb_ScanResponse_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 339b98b7a7a..1e48ef09f58 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -282,6 +282,7 @@ message ScanRequest { optional bool client_handles_partials = 7; optional bool client_handles_heartbeats = 8; optional bool track_scan_metrics = 9; + optional bool renew = 10 [default = false]; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 2cdf46dbcfd..325f7bc5169 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2385,6 +2385,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, scannerName = String.valueOf(scannerId); ttl = this.scannerLeaseTimeoutPeriod; } + if (request.hasRenew() && request.getRenew()) { + lease = regionServer.leases.removeLease(scannerName); + if (lease != null && scanners.containsKey(scannerName)) { + regionServer.leases.addLease(lease); + } + return builder.build(); + } quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN); long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index bb2784d87ce..a0a8747bb9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -487,29 +487,4 @@ public class TestFromClientSide3 { assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES)); table.close(); } - - @Test - public void testLeaseRenewal() throws Exception { - HTable table = TEST_UTIL.createTable( - Bytes.toBytes("testLeaseRenewal"), FAMILY); - Put p = new Put(ROW_BYTES); - p.add(FAMILY, COL_QUAL, VAL_BYTES); - table.put(p); - p = new Put(ANOTHERROW); - p.add(FAMILY, COL_QUAL, VAL_BYTES); - table.put(p); - Scan s = new Scan(); - s.setCaching(1); - ResultScanner rs = table.getScanner(s); - // make sure that calling renewLease does not impact the scan results - assertTrue(((AbstractClientScanner)rs).renewLease()); - assertTrue(Arrays.equals(rs.next().getRow(), ANOTHERROW)); - assertTrue(((AbstractClientScanner)rs).renewLease()); - assertTrue(Arrays.equals(rs.next().getRow(), ROW_BYTES)); - assertTrue(((AbstractClientScanner)rs).renewLease()); - assertNull(rs.next()); - assertFalse(((AbstractClientScanner)rs).renewLease()); - rs.close(); - table.close(); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java new file mode 100644 index 00000000000..fd9c9bb1236 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java @@ -0,0 +1,125 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestLeaseRenewal { + final Log LOG = LogFactory.getLog(getClass()); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static final byte[] ANOTHERROW = Bytes.toBytes("anotherrow"); + private final static byte[] COL_QUAL = Bytes.toBytes("f1"); + private final static byte[] VAL_BYTES = Bytes.toBytes("v1"); + private final static byte[] ROW_BYTES = Bytes.toBytes("r1"); + private final static int leaseTimeout = + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD / 4; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + leaseTimeout); + TEST_UTIL.startMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + // Nothing to do. + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + for (HTableDescriptor htd : TEST_UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + TEST_UTIL.deleteTable(htd.getTableName()); + } + } + + @Test + public void testLeaseRenewal() throws Exception { + HTable table = TEST_UTIL.createTable( + TableName.valueOf("testLeaseRenewal"), FAMILY); + Put p = new Put(ROW_BYTES); + p.addColumn(FAMILY, COL_QUAL, VAL_BYTES); + table.put(p); + p = new Put(ANOTHERROW); + p.addColumn(FAMILY, COL_QUAL, VAL_BYTES); + table.put(p); + Scan s = new Scan(); + s.setCaching(1); + ResultScanner rs = table.getScanner(s); + // make sure that calling renewLease does not impact the scan results + assertTrue(((AbstractClientScanner)rs).renewLease()); + assertTrue(Arrays.equals(rs.next().getRow(), ANOTHERROW)); + // renew the lease a few times, long enough to be sure + // the lease would have expired otherwise + Thread.sleep(leaseTimeout/2); + assertTrue(((AbstractClientScanner)rs).renewLease()); + Thread.sleep(leaseTimeout/2); + assertTrue(((AbstractClientScanner)rs).renewLease()); + Thread.sleep(leaseTimeout/2); + assertTrue(((AbstractClientScanner)rs).renewLease()); + // make sure we haven't advanced the scanner + assertTrue(Arrays.equals(rs.next().getRow(), ROW_BYTES)); + assertTrue(((AbstractClientScanner)rs).renewLease()); + // make sure scanner is exhausted now + assertNull(rs.next()); + // renewLease should return false now + assertFalse(((AbstractClientScanner)rs).renewLease()); + rs.close(); + table.close(); + } +} From 6564cae667caef32713d582eca95ebf66c7dddf3 Mon Sep 17 00:00:00 2001 From: Mikhail Antonov Date: Sat, 19 Dec 2015 11:58:15 -0800 Subject: [PATCH 42/72] HBASE-15015 Checktyle plugin shouldn't check Jamon-generated Java classes --- .../src/main/resources/hbase/checkstyle-suppressions.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml index 5f3f5868774..46009e9e5d4 100644 --- a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml +++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml @@ -32,5 +32,6 @@ --> + From 79baaeb540f3ce575ea58294eaf2ad98ac49786e Mon Sep 17 00:00:00 2001 From: Mikhail Antonov Date: Sat, 19 Dec 2015 12:44:30 -0800 Subject: [PATCH 43/72] HBASE-14976 Add RPC call queues to the web ui (Pallavi Adusumilli) --- .../hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon | 3 ++- .../hbase/tmpl/regionserver/ServerMetricsTmpl.jamon | 8 +++++++- .../org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java | 6 ++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index f38cce95964..158a2393b4e 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -104,7 +104,8 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;

    Server Metrics

    - <& ServerMetricsTmpl; mWrap = regionServer.getRegionServerMetrics().getRegionServerWrapper(); &> + <& ServerMetricsTmpl; mWrap = regionServer.getRegionServerMetrics().getRegionServerWrapper(); + mServerWrap = regionServer.getRpcServer().getMetrics().getHBaseServerWrapper(); &>
    diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon index a86a4ea1cf4..ed0926fd321 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon @@ -18,10 +18,12 @@ limitations under the License. <%args> MetricsRegionServerWrapper mWrap; +MetricsHBaseServerWrapper mServerWrap; <%import> java.util.*; org.apache.hadoop.hbase.regionserver.HRegionServer; +org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapper; org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper; org.apache.hadoop.hbase.util.Bytes; org.apache.hadoop.hbase.HRegionInfo; @@ -61,7 +63,7 @@ java.lang.management.ManagementFactory; <& storeStats; mWrap = mWrap &>
    - <& queueStats; mWrap = mWrap &> + <& queueStats; mWrap = mWrap; mServerWrap = mServerWrap; &>
    @@ -184,16 +186,20 @@ MetricsRegionServerWrapper mWrap; <%def queueStats> <%args> MetricsRegionServerWrapper mWrap; +MetricsHBaseServerWrapper mServerWrap; + +
    Compaction Queue Size Flush Queue SizeCall Queue Size (bytes)
    <% mWrap.getCompactionQueueSize() %> <% mWrap.getFlushQueueSize() %><% StringUtils.TraditionalBinaryPrefix.long2String(mServerWrap.getTotalQueueSize(), + "", 1) %>
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index 05bebb83c42..e514f5fdbd6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -32,8 +32,10 @@ import org.apache.hadoop.hbase.exceptions.RegionMovedException; @InterfaceAudience.Private public class MetricsHBaseServer { private MetricsHBaseServerSource source; + private MetricsHBaseServerWrapper serverWrapper; public MetricsHBaseServer(String serverName, MetricsHBaseServerWrapper wrapper) { + serverWrapper = wrapper; source = CompatibilitySingletonFactory.getInstance(MetricsHBaseServerSourceFactory.class) .create(serverName, wrapper); } @@ -115,4 +117,8 @@ public class MetricsHBaseServer { public MetricsHBaseServerSource getMetricsSource() { return source; } + + public MetricsHBaseServerWrapper getHBaseServerWrapper() { + return serverWrapper; + } } From b82148b705085bf9a81f3502fd074584494a7a87 Mon Sep 17 00:00:00 2001 From: Pallavi Adusumilli Date: Mon, 21 Dec 2015 10:57:25 -0800 Subject: [PATCH 44/72] HBASE-14976 fix tests Signed-off-by: Mikhail Antonov --- .../hadoop/hbase/regionserver/TestRSStatusServlet.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index ce000638e73..511bf252a89 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -48,6 +48,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.ipc.MetricsHBaseServer; +import org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapperStub; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; /** * Tests for the region server status page and its template. @@ -57,6 +60,7 @@ public class TestRSStatusServlet { private static final Log LOG = LogFactory.getLog(TestRSStatusServlet.class); private HRegionServer rs; private RSRpcServices rpcServices; + private RpcServerInterface rpcServer; static final int FAKE_IPC_PORT = 1585; static final int FAKE_WEB_PORT = 1586; @@ -73,9 +77,11 @@ public class TestRSStatusServlet { public void setupBasicMocks() throws IOException, ServiceException { rs = Mockito.mock(HRegionServer.class); rpcServices = Mockito.mock(RSRpcServices.class); + rpcServer = Mockito.mock(RpcServerInterface.class); Mockito.doReturn(HBaseConfiguration.create()) .when(rs).getConfiguration(); Mockito.doReturn(rpcServices).when(rs).getRSRpcServices(); + Mockito.doReturn(rpcServer).when(rs).getRpcServer(); Mockito.doReturn(fakeResponse).when(rpcServices).getServerInfo( (RpcController)Mockito.any(), (GetServerInfoRequest)Mockito.any()); // Fake ZKW @@ -97,6 +103,10 @@ public class TestRSStatusServlet { MetricsRegionServer rms = Mockito.mock(MetricsRegionServer.class); Mockito.doReturn(new MetricsRegionServerWrapperStub()).when(rms).getRegionServerWrapper(); Mockito.doReturn(rms).when(rs).getRegionServerMetrics(); + + MetricsHBaseServer ms = Mockito.mock(MetricsHBaseServer.class); + Mockito.doReturn(new MetricsHBaseServerWrapperStub()).when(ms).getHBaseServerWrapper(); + Mockito.doReturn(ms).when(rpcServer).getMetrics(); } @Test From 85257a25282051bb2fa85ed893e308de6c5c3ff4 Mon Sep 17 00:00:00 2001 From: Matteo Bertozzi Date: Mon, 21 Dec 2015 13:20:26 -0800 Subject: [PATCH 45/72] HBASE-15022 replication_admin.rb throws undefined method `getZooKeeperClusterKey' for ZKUtil --- hbase-shell/src/main/ruby/hbase/replication_admin.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb index 617073b6a0a..2a248298204 100644 --- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb +++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb @@ -22,7 +22,7 @@ include Java java_import org.apache.hadoop.hbase.client.replication.ReplicationAdmin java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig java_import org.apache.hadoop.hbase.util.Bytes -java_import org.apache.hadoop.hbase.zookeeper.ZKUtil +java_import org.apache.hadoop.hbase.zookeeper.ZKConfig java_import org.apache.hadoop.hbase.TableName # Wrapper for org.apache.hadoop.hbase.client.replication.ReplicationAdmin @@ -62,7 +62,7 @@ module Hbase # Cluster Key is required for ReplicationPeerConfig for a custom replication endpoint if !endpoint_classname.nil? and cluster_key.nil? - cluster_key = ZKUtil.getZooKeeperClusterKey(@configuration) + cluster_key = ZKConfig.getZooKeeperClusterKey(@configuration) end # Optional parameters From b0965b1805f577b634052191d0c337d3cafabbca Mon Sep 17 00:00:00 2001 From: chenheng Date: Tue, 22 Dec 2015 11:25:57 +0800 Subject: [PATCH 46/72] HBASE-14684 Try to remove all MiniMapReduceCluster in unit tests --- .../mapreduce/MultiTableInputFormatTestBase.java | 4 +--- .../mapreduce/TableSnapshotInputFormatTestBase.java | 3 +-- .../hadoop/hbase/mapreduce/TestCellCounter.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestCopyTable.java | 3 +-- .../hadoop/hbase/mapreduce/TestHFileOutputFormat.java | 11 ++++------- .../hbase/mapreduce/TestHFileOutputFormat2.java | 11 +++-------- .../hbase/mapreduce/TestHRegionPartitioner.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestHashTable.java | 3 +-- .../hadoop/hbase/mapreduce/TestImportExport.java | 3 +-- .../TestImportTSVWithOperationAttributes.java | 4 +--- .../hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestImportTsv.java | 3 +-- .../hbase/mapreduce/TestMultiTableInputFormat.java | 4 +--- .../hbase/mapreduce/TestMultithreadedTableMapper.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestRowCounter.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestSyncTable.java | 3 +-- .../hadoop/hbase/mapreduce/TestTableInputFormat.java | 4 +--- .../hbase/mapreduce/TestTableMapReduceBase.java | 3 +-- .../hadoop/hbase/mapreduce/TestTimeRangeMapRed.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestWALPlayer.java | 3 +-- 20 files changed, 25 insertions(+), 55 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index eaedebf846a..3ecf28d9576 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -78,6 +78,7 @@ public abstract class MultiTableInputFormatTestBase { public static void setUpBeforeClass() throws Exception { // switch TIF to log at DEBUG level TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); + TEST_UTIL.setJobWithoutMRCluster(); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table @@ -92,13 +93,10 @@ public abstract class MultiTableInputFormatTestBase { } } } - // start MR cluster - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 38088069a24..2941286b3af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -60,6 +60,7 @@ public abstract class TableSnapshotInputFormatTestBase { public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(NUM_REGION_SERVERS, true); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -158,14 +159,12 @@ public abstract class TableSnapshotInputFormatTestBase { protected void testWithMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, int expectedNumSplits, boolean shutdownCluster) throws Exception { setupCluster(); - util.startMiniMapReduceCluster(); try { Path tableDir = util.getDataTestDirOnTestFS(snapshotName); TableName tableName = TableName.valueOf("testWithMapReduce"); testWithMapReduceImpl(util, tableName, snapshotName, tableDir, numRegions, expectedNumSplits, shutdownCluster); } finally { - util.shutdownMiniMapReduceCluster(); tearDownCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index e3d03b8fafb..41b76ba8af2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -60,15 +60,14 @@ public class TestCellCounter { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - UTIL.startMiniMapReduceCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem()); FileUtil.fullyDelete(new File(OUTPUT_DIR)); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 5492938294e..b8ad5be6981 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -62,13 +62,12 @@ public class TestCopyTable { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index 2bdcbc1fc37..b4bae992ea3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -390,6 +390,7 @@ public class TestHFileOutputFormat { byte[][] splitKeys = generateRandomSplitKeys(4); HBaseAdmin admin = null; try { + util.setJobWithoutMRCluster(); util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); admin = util.getHBaseAdmin(); @@ -403,7 +404,6 @@ public class TestHFileOutputFormat { assertEquals("Should make 5 regions", numRegions, 5); // Generate the bulk load files - util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table, testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", @@ -472,7 +472,6 @@ public class TestHFileOutputFormat { tableDigestBefore, util.checksumRows(table)); } finally { if (admin != null) admin.close(); - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -919,6 +918,7 @@ public class TestHFileOutputFormat { generateRandomStartKeys(5); try { + util.setJobWithoutMRCluster(); util.startMiniCluster(); final FileSystem fs = util.getDFSCluster().getFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); @@ -935,7 +935,6 @@ public class TestHFileOutputFormat { // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); @@ -974,7 +973,6 @@ public class TestHFileOutputFormat { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -986,9 +984,10 @@ public class TestHFileOutputFormat { generateRandomStartKeys(5); try { + util.setJobWithoutMRCluster(); util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); - final FileSystem fs = util.getDFSCluster().getFileSystem(); + final FileSystem fs = util.getTestFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); @@ -1015,7 +1014,6 @@ public class TestHFileOutputFormat { // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table, testDir); // Perform the actual load @@ -1051,7 +1049,6 @@ public class TestHFileOutputFormat { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index e55affcabfb..bb1a073e5f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -421,6 +421,7 @@ public class TestHFileOutputFormat2 { for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } + util.setJobWithoutMRCluster(); util.startMiniCluster(1, hostCount, hostnames); HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys); @@ -431,7 +432,6 @@ public class TestHFileOutputFormat2 { assertEquals("Should make " + regionNum + " regions", numRegions, regionNum); // Generate the bulk load files - util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table.getTableDescriptor(), table.getRegionLocator(), testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", 0, util.countRows(table)); @@ -511,7 +511,6 @@ public class TestHFileOutputFormat2 { } finally { testDir.getFileSystem(conf).delete(testDir, true); util.deleteTable(TABLE_NAME); - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -956,7 +955,7 @@ public class TestHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); - + util.setJobWithoutMRCluster(); util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(); Admin admin = conn.getAdmin()) { @@ -974,7 +973,6 @@ public class TestHFileOutputFormat2 { // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); @@ -1016,7 +1014,6 @@ public class TestHFileOutputFormat2 { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -1026,7 +1023,7 @@ public class TestHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); - + util.setJobWithoutMRCluster(); util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()){ @@ -1058,7 +1055,6 @@ public class TestHFileOutputFormat2 { // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME); runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir); @@ -1098,7 +1094,6 @@ public class TestHFileOutputFormat2 { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index 220bc025aa5..4354da5c9a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -34,13 +34,12 @@ public class TestHRegionPartitioner { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index 762f5301634..eae33a60b0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -56,13 +56,12 @@ public class TestHashTable { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 2e06fab3aa8..2154f990922 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -107,15 +107,14 @@ public class TestImportExport { public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - UTIL.startMiniMapReduceCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index eba843e7a6b..acb0e7cbe7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -102,14 +102,12 @@ public class TestImportTSVWithOperationAttributes implements Configurable { conf = util.getConfiguration(); conf.set("hbase.coprocessor.master.classes", OperationAttributesTestController.class.getName()); conf.set("hbase.coprocessor.region.classes", OperationAttributesTestController.class.getName()); + util.setJobWithoutMRCluster(); util.startMiniCluster(); - Admin admin = new HBaseAdmin(util.getConfiguration()); - util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index 268bba2a10d..5add5d4e51a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -88,13 +88,12 @@ public class TestImportTSVWithTTLs implements Configurable { // need this where the default hfile version is not 3 (i.e. 0.98) conf.setInt("hfile.format.version", 3); conf.set("hbase.coprocessor.region.classes", TTLCheckingObserver.class.getName()); + util.setJobWithoutMRCluster(); util.startMiniCluster(); - util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index 5adc04a7656..40c4b4d3a19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -103,13 +103,12 @@ public class TestImportTsv implements Configurable { @BeforeClass public static void provisionCluster() throws Exception { + util.setJobWithoutMRCluster(); util.startMiniCluster(); - util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index a54df0830e0..14edc30b452 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -71,6 +71,7 @@ public class TestMultiTableInputFormat { // switch TIF to log at DEBUG level TEST_UTIL.enableDebug(MultiTableInputFormat.class); TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); + TEST_UTIL.setJobWithoutMRCluster(); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table @@ -81,13 +82,10 @@ public class TestMultiTableInputFormat { TEST_UTIL.loadTable(table, INPUT_FAMILY, false); } } - // start MR cluster - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index 6b2ee753a99..9a81990688d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -72,18 +72,17 @@ public class TestMultithreadedTableMapper { public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); HTable table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); - UTIL.startMiniMapReduceCluster(); UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index 817e7a9a318..0e04c673003 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -80,8 +80,8 @@ public class TestRowCounter { @BeforeClass public static void setUpBeforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(); - TEST_UTIL.startMiniMapReduceCluster(); Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), Bytes.toBytes(COL_FAM)); writeRows(table); table.close(); @@ -94,7 +94,6 @@ public class TestRowCounter { public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); - TEST_UTIL.shutdownMiniMapReduceCluster(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index 2f1bee3199b..1b356e6253e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -64,13 +64,12 @@ public class TestSyncTable { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index c53510fcc9e..d404833716b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -77,20 +77,18 @@ public class TestTableInputFormat { private static final Log LOG = LogFactory.getLog(TestTableInputFormat.class); private final static HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static MiniMRCluster mrCluster; static final byte[] FAMILY = Bytes.toBytes("family"); private static final byte[][] columns = new byte[][] { FAMILY }; @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - mrCluster = UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index 8aed5884094..022d4c9351b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -79,17 +79,16 @@ public abstract class TestTableMapReduceBase { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); HTable table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); - UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index cd85756062a..207f256f291 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -83,6 +83,7 @@ public class TestTimeRangeMapRed { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); } @@ -167,7 +168,6 @@ public class TestTimeRangeMapRed { private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { - UTIL.startMiniMapReduceCluster(); Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); @@ -184,7 +184,6 @@ public class TestTimeRangeMapRed { // TODO Auto-generated catch block e.printStackTrace(); } finally { - UTIL.shutdownMiniMapReduceCluster(); if (job != null) { FileUtil.fullyDelete( new File(job.getConfiguration().get("hadoop.tmp.dir"))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index d6929a255bb..343fc64b409 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -70,13 +70,12 @@ public class TestWALPlayer { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); cluster = TEST_UTIL.startMiniCluster(); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } From b7100c934b8ca8e77208949f92a7d173c9e4c558 Mon Sep 17 00:00:00 2001 From: anoopsjohn Date: Tue, 22 Dec 2015 09:58:12 +0530 Subject: [PATCH 47/72] HBASE-13158 When client supports CellBlock, return the result Cells as controller payload for get(Get) API also - Addendum --- .../src/main/java/org/apache/hadoop/hbase/client/HTable.java | 2 +- .../java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 35d9a74a956..24dc06a9ea0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -756,7 +756,7 @@ public class HTable implements HTableInterface, RegionLocator { try { ClientProtos.GetResponse response = getStub().get(controller, request); if (!response.hasResult()) return null; - return ProtobufUtil.toResult(response.getResult()); + return ProtobufUtil.toResult(response.getResult(), controller.cellScanner()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 907650b2cb9..47305ffb9f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -1591,6 +1591,7 @@ public final class ProtobufUtil { try { GetResponse response = client.get(null, request); if (!response.hasResult()) return null; + // We pass 'null' RpcController. So Result will be pure RB. return toResult(response.getResult()); } catch (ServiceException se) { throw getRemoteException(se); From a51dc02ed3b7252add956dd81f712c7f7053d823 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Fri, 18 Dec 2015 14:14:25 -0800 Subject: [PATCH 48/72] HBASE-15014 Fix filterCellByStore in WALsplitter is awful for performance --- .../hbase/regionserver/wal/WALEdit.java | 14 ++++++++++++- .../apache/hadoop/hbase/wal/WALSplitter.java | 20 ++++++++++++------- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 5e53e411243..c47ce13b307 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -99,7 +99,7 @@ public class WALEdit implements Writable, HeapSize { private final int VERSION_2 = -1; private final boolean isReplay; - private final ArrayList cells = new ArrayList(1); + private ArrayList cells = new ArrayList(1); public static final WALEdit EMPTY_WALEDIT = new WALEdit(); @@ -170,6 +170,18 @@ public class WALEdit implements Writable, HeapSize { return cells; } + /** + * This is not thread safe. + * This will change the WALEdit and shouldn't be used unless you are sure that nothing + * else depends on the contents being immutable. + * + * @param cells the list of cells that this WALEdit now contains. + */ + @InterfaceAudience.Private + public void setCells(ArrayList cells) { + this.cells = cells; + } + public NavigableMap getAndRemoveScopes() { NavigableMap result = scopes; scopes = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 98882ff1051..c047f8d6600 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -1513,21 +1513,27 @@ public class WALSplitter { if (maxSeqIdInStores == null || maxSeqIdInStores.isEmpty()) { return; } - List skippedCells = new ArrayList(); + // Create the array list for the cells that aren't filtered. + // We make the assumption that most cells will be kept. + ArrayList keptCells = new ArrayList(logEntry.getEdit().getCells().size()); for (Cell cell : logEntry.getEdit().getCells()) { - if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { + if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { + keptCells.add(cell); + } else { byte[] family = CellUtil.cloneFamily(cell); Long maxSeqId = maxSeqIdInStores.get(family); // Do not skip cell even if maxSeqId is null. Maybe we are in a rolling upgrade, // or the master was crashed before and we can not get the information. - if (maxSeqId != null && maxSeqId.longValue() >= logEntry.getKey().getLogSeqNum()) { - skippedCells.add(cell); + if (maxSeqId == null || maxSeqId.longValue() < logEntry.getKey().getLogSeqNum()) { + keptCells.add(cell); } } } - if (!skippedCells.isEmpty()) { - logEntry.getEdit().getCells().removeAll(skippedCells); - } + + // Anything in the keptCells array list is still live. + // So rather than removing the cells from the array list + // which would be an O(n^2) operation, we just replace the list + logEntry.getEdit().setCells(keptCells); } @Override From cf059e54b589ac45769c6f16863aed2debb638b4 Mon Sep 17 00:00:00 2001 From: chenheng Date: Tue, 22 Dec 2015 16:10:58 +0800 Subject: [PATCH 49/72] Revert "HBASE-14684 Try to remove all MiniMapReduceCluster in unit tests" This reverts commit b0965b1805f577b634052191d0c337d3cafabbca. --- .../mapreduce/MultiTableInputFormatTestBase.java | 4 +++- .../mapreduce/TableSnapshotInputFormatTestBase.java | 3 ++- .../hadoop/hbase/mapreduce/TestCellCounter.java | 3 ++- .../apache/hadoop/hbase/mapreduce/TestCopyTable.java | 3 ++- .../hadoop/hbase/mapreduce/TestHFileOutputFormat.java | 11 +++++++---- .../hbase/mapreduce/TestHFileOutputFormat2.java | 11 ++++++++--- .../hbase/mapreduce/TestHRegionPartitioner.java | 3 ++- .../apache/hadoop/hbase/mapreduce/TestHashTable.java | 3 ++- .../hadoop/hbase/mapreduce/TestImportExport.java | 3 ++- .../TestImportTSVWithOperationAttributes.java | 4 +++- .../hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java | 3 ++- .../apache/hadoop/hbase/mapreduce/TestImportTsv.java | 3 ++- .../hbase/mapreduce/TestMultiTableInputFormat.java | 4 +++- .../hbase/mapreduce/TestMultithreadedTableMapper.java | 3 ++- .../apache/hadoop/hbase/mapreduce/TestRowCounter.java | 3 ++- .../apache/hadoop/hbase/mapreduce/TestSyncTable.java | 3 ++- .../hadoop/hbase/mapreduce/TestTableInputFormat.java | 4 +++- .../hbase/mapreduce/TestTableMapReduceBase.java | 3 ++- .../hadoop/hbase/mapreduce/TestTimeRangeMapRed.java | 3 ++- .../apache/hadoop/hbase/mapreduce/TestWALPlayer.java | 3 ++- 20 files changed, 55 insertions(+), 25 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index 3ecf28d9576..eaedebf846a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -78,7 +78,6 @@ public abstract class MultiTableInputFormatTestBase { public static void setUpBeforeClass() throws Exception { // switch TIF to log at DEBUG level TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); - TEST_UTIL.setJobWithoutMRCluster(); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table @@ -93,10 +92,13 @@ public abstract class MultiTableInputFormatTestBase { } } } + // start MR cluster + TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 2941286b3af..38088069a24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -60,7 +60,6 @@ public abstract class TableSnapshotInputFormatTestBase { public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(NUM_REGION_SERVERS, true); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -159,12 +158,14 @@ public abstract class TableSnapshotInputFormatTestBase { protected void testWithMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, int expectedNumSplits, boolean shutdownCluster) throws Exception { setupCluster(); + util.startMiniMapReduceCluster(); try { Path tableDir = util.getDataTestDirOnTestFS(snapshotName); TableName tableName = TableName.valueOf("testWithMapReduce"); testWithMapReduceImpl(util, tableName, snapshotName, tableDir, numRegions, expectedNumSplits, shutdownCluster); } finally { + util.shutdownMiniMapReduceCluster(); tearDownCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index 41b76ba8af2..e3d03b8fafb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -60,14 +60,15 @@ public class TestCellCounter { @BeforeClass public static void beforeClass() throws Exception { - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); + UTIL.startMiniMapReduceCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem()); FileUtil.fullyDelete(new File(OUTPUT_DIR)); } @AfterClass public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index b8ad5be6981..5492938294e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -62,12 +62,13 @@ public class TestCopyTable { @BeforeClass public static void beforeClass() throws Exception { - TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index b4bae992ea3..2bdcbc1fc37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -390,7 +390,6 @@ public class TestHFileOutputFormat { byte[][] splitKeys = generateRandomSplitKeys(4); HBaseAdmin admin = null; try { - util.setJobWithoutMRCluster(); util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); admin = util.getHBaseAdmin(); @@ -404,6 +403,7 @@ public class TestHFileOutputFormat { assertEquals("Should make 5 regions", numRegions, 5); // Generate the bulk load files + util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table, testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", @@ -472,6 +472,7 @@ public class TestHFileOutputFormat { tableDigestBefore, util.checksumRows(table)); } finally { if (admin != null) admin.close(); + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -918,7 +919,6 @@ public class TestHFileOutputFormat { generateRandomStartKeys(5); try { - util.setJobWithoutMRCluster(); util.startMiniCluster(); final FileSystem fs = util.getDFSCluster().getFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); @@ -935,6 +935,7 @@ public class TestHFileOutputFormat { // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); + util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); @@ -973,6 +974,7 @@ public class TestHFileOutputFormat { }, 5000); } finally { + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -984,10 +986,9 @@ public class TestHFileOutputFormat { generateRandomStartKeys(5); try { - util.setJobWithoutMRCluster(); util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); - final FileSystem fs = util.getTestFileSystem(); + final FileSystem fs = util.getDFSCluster().getFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); @@ -1014,6 +1015,7 @@ public class TestHFileOutputFormat { // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); + util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table, testDir); // Perform the actual load @@ -1049,6 +1051,7 @@ public class TestHFileOutputFormat { }, 5000); } finally { + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index bb1a073e5f5..e55affcabfb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -421,7 +421,6 @@ public class TestHFileOutputFormat2 { for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } - util.setJobWithoutMRCluster(); util.startMiniCluster(1, hostCount, hostnames); HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys); @@ -432,6 +431,7 @@ public class TestHFileOutputFormat2 { assertEquals("Should make " + regionNum + " regions", numRegions, regionNum); // Generate the bulk load files + util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table.getTableDescriptor(), table.getRegionLocator(), testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", 0, util.countRows(table)); @@ -511,6 +511,7 @@ public class TestHFileOutputFormat2 { } finally { testDir.getFileSystem(conf).delete(testDir, true); util.deleteTable(TABLE_NAME); + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -955,7 +956,7 @@ public class TestHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); - util.setJobWithoutMRCluster(); + util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(); Admin admin = conn.getAdmin()) { @@ -973,6 +974,7 @@ public class TestHFileOutputFormat2 { // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); + util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); @@ -1014,6 +1016,7 @@ public class TestHFileOutputFormat2 { }, 5000); } finally { + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -1023,7 +1026,7 @@ public class TestHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); - util.setJobWithoutMRCluster(); + util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()){ @@ -1055,6 +1058,7 @@ public class TestHFileOutputFormat2 { // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); + util.startMiniMapReduceCluster(); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME); runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir); @@ -1094,6 +1098,7 @@ public class TestHFileOutputFormat2 { }, 5000); } finally { + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index 4354da5c9a2..220bc025aa5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -34,12 +34,13 @@ public class TestHRegionPartitioner { @BeforeClass public static void beforeClass() throws Exception { - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); + UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index eae33a60b0e..762f5301634 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -56,12 +56,13 @@ public class TestHashTable { @BeforeClass public static void beforeClass() throws Exception { - TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 2154f990922..2e06fab3aa8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -107,14 +107,15 @@ public class TestImportExport { public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); + UTIL.startMiniMapReduceCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); } @AfterClass public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index acb0e7cbe7d..eba843e7a6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -102,12 +102,14 @@ public class TestImportTSVWithOperationAttributes implements Configurable { conf = util.getConfiguration(); conf.set("hbase.coprocessor.master.classes", OperationAttributesTestController.class.getName()); conf.set("hbase.coprocessor.region.classes", OperationAttributesTestController.class.getName()); - util.setJobWithoutMRCluster(); util.startMiniCluster(); + Admin admin = new HBaseAdmin(util.getConfiguration()); + util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index 5add5d4e51a..268bba2a10d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -88,12 +88,13 @@ public class TestImportTSVWithTTLs implements Configurable { // need this where the default hfile version is not 3 (i.e. 0.98) conf.setInt("hfile.format.version", 3); conf.set("hbase.coprocessor.region.classes", TTLCheckingObserver.class.getName()); - util.setJobWithoutMRCluster(); util.startMiniCluster(); + util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index 40c4b4d3a19..5adc04a7656 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -103,12 +103,13 @@ public class TestImportTsv implements Configurable { @BeforeClass public static void provisionCluster() throws Exception { - util.setJobWithoutMRCluster(); util.startMiniCluster(); + util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { + util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index 14edc30b452..a54df0830e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -71,7 +71,6 @@ public class TestMultiTableInputFormat { // switch TIF to log at DEBUG level TEST_UTIL.enableDebug(MultiTableInputFormat.class); TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); - TEST_UTIL.setJobWithoutMRCluster(); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table @@ -82,10 +81,13 @@ public class TestMultiTableInputFormat { TEST_UTIL.loadTable(table, INPUT_FAMILY, false); } } + // start MR cluster + TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index 9a81990688d..6b2ee753a99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -72,17 +72,18 @@ public class TestMultithreadedTableMapper { public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); HTable table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); + UTIL.startMiniMapReduceCluster(); UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } @AfterClass public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index 0e04c673003..817e7a9a318 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -80,8 +80,8 @@ public class TestRowCounter { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(); + TEST_UTIL.startMiniMapReduceCluster(); Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), Bytes.toBytes(COL_FAM)); writeRows(table); table.close(); @@ -94,6 +94,7 @@ public class TestRowCounter { public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); + TEST_UTIL.shutdownMiniMapReduceCluster(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index 1b356e6253e..2f1bee3199b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -64,12 +64,13 @@ public class TestSyncTable { @BeforeClass public static void beforeClass() throws Exception { - TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index d404833716b..c53510fcc9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -77,18 +77,20 @@ public class TestTableInputFormat { private static final Log LOG = LogFactory.getLog(TestTableInputFormat.class); private final static HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static MiniMRCluster mrCluster; static final byte[] FAMILY = Bytes.toBytes("family"); private static final byte[][] columns = new byte[][] { FAMILY }; @BeforeClass public static void beforeClass() throws Exception { - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); + mrCluster = UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index 022d4c9351b..8aed5884094 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -79,16 +79,17 @@ public abstract class TestTableMapReduceBase { @BeforeClass public static void beforeClass() throws Exception { - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); HTable table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); + UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 207f256f291..cd85756062a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -83,7 +83,6 @@ public class TestTimeRangeMapRed { @BeforeClass public static void beforeClass() throws Exception { - UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); } @@ -168,6 +167,7 @@ public class TestTimeRangeMapRed { private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { + UTIL.startMiniMapReduceCluster(); Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); @@ -184,6 +184,7 @@ public class TestTimeRangeMapRed { // TODO Auto-generated catch block e.printStackTrace(); } finally { + UTIL.shutdownMiniMapReduceCluster(); if (job != null) { FileUtil.fullyDelete( new File(job.getConfiguration().get("hadoop.tmp.dir"))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 343fc64b409..d6929a255bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -70,12 +70,13 @@ public class TestWALPlayer { @BeforeClass public static void beforeClass() throws Exception { - TEST_UTIL.setJobWithoutMRCluster(); cluster = TEST_UTIL.startMiniCluster(); + TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } From 1b66f474b5fcc808251c4b6fd5d4ae028656e44d Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Tue, 22 Dec 2015 02:04:09 -0800 Subject: [PATCH 50/72] HBASE-14977 ChoreService.shutdown may result in ConcurrentModificationException - ADDENDUM to fix extra concurrency issues --- .../java/org/apache/hadoop/hbase/ScheduledChore.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java index 538b3905bbb..dd98d26565b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java @@ -228,7 +228,7 @@ public abstract class ScheduledChore implements Runnable { && getTimeBetweenRuns() > getMaximumAllowedTimeBetweenRuns(); } - private synchronized double getMaximumAllowedTimeBetweenRuns() { + private double getMaximumAllowedTimeBetweenRuns() { // Threshold used to determine if the Chore's current run started too late return 1.5 * period; } @@ -268,23 +268,23 @@ public abstract class ScheduledChore implements Runnable { choreServicer = null; } - public synchronized String getName() { + public String getName() { return name; } - public synchronized Stoppable getStopper() { + public Stoppable getStopper() { return stopper; } - public synchronized int getPeriod() { + public int getPeriod() { return period; } - public synchronized long getInitialDelay() { + public long getInitialDelay() { return initialDelay; } - public final synchronized TimeUnit getTimeUnit() { + public TimeUnit getTimeUnit() { return timeUnit; } From ff069ef97b587435e75af7d97445d4663fd439f0 Mon Sep 17 00:00:00 2001 From: tedyu Date: Tue, 22 Dec 2015 06:49:14 -0800 Subject: [PATCH 51/72] HBASE-15028 Minor fix on RegionGroupingProvider (Yu Li) --- .../org/apache/hadoop/hbase/wal/RegionGroupingProvider.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 288542893b8..0aeaccf2d86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -185,7 +185,7 @@ class RegionGroupingProvider implements WALProvider { } private WAL getWAL(final String group) throws IOException { - WAL log = cached.get(walCacheLock); + WAL log = cached.get(group); if (null == log) { // only lock when need to create wal, and need to lock since // creating hlog on fs is time consuming From f7b67ca763fbb18029495b478907c2581a4b3c12 Mon Sep 17 00:00:00 2001 From: chenheng Date: Tue, 22 Dec 2015 18:29:31 +0800 Subject: [PATCH 52/72] HBASE-14684 Try to remove all MiniMapReduceCluster in unit tests --- .../hadoop/hbase/HBaseCommonTestingUtility.java | 6 ++++++ .../hbase/mapred/TestTableSnapshotInputFormat.java | 4 ++-- .../mapreduce/MultiTableInputFormatTestBase.java | 7 +++---- .../mapreduce/TableSnapshotInputFormatTestBase.java | 7 +++---- .../hadoop/hbase/mapreduce/TestCellCounter.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestCopyTable.java | 3 +-- .../hadoop/hbase/mapreduce/TestHFileOutputFormat.java | 11 ++++------- .../hbase/mapreduce/TestHFileOutputFormat2.java | 11 +++-------- .../hbase/mapreduce/TestHRegionPartitioner.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestHashTable.java | 3 +-- .../hadoop/hbase/mapreduce/TestImportExport.java | 3 +-- .../TestImportTSVWithOperationAttributes.java | 4 +--- .../hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestImportTsv.java | 3 +-- .../hbase/mapreduce/TestMultiTableInputFormat.java | 7 +++---- .../hbase/mapreduce/TestMultithreadedTableMapper.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestRowCounter.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestSyncTable.java | 3 +-- .../hadoop/hbase/mapreduce/TestTableInputFormat.java | 4 +--- .../hbase/mapreduce/TestTableMapReduceBase.java | 3 +-- .../hbase/mapreduce/TestTableSnapshotInputFormat.java | 4 ++-- .../hadoop/hbase/mapreduce/TestTimeRangeMapRed.java | 3 +-- .../apache/hadoop/hbase/mapreduce/TestWALPlayer.java | 3 +-- 23 files changed, 41 insertions(+), 63 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java index 18ca35c11b5..b7361bfcf83 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java @@ -176,6 +176,12 @@ public class HBaseCommonTestingUtility { return new Path(PathName); } + public Path getRandomDir() { + String randomStr = UUID.randomUUID().toString(); + Path testPath = new Path(getBaseTestDir(), randomStr); + return testPath; + } + /** * @param dir Directory to delete * @return True if we deleted it. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java index 29c37c558c6..c3771c5d5e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java @@ -103,7 +103,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); JobConf job = new JobConf(UTIL.getConfiguration()); - Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); + Path tmpTableDir = UTIL.getRandomDir(); TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, @@ -166,7 +166,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); JobConf job = new JobConf(util.getConfiguration()); - Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName); + Path tmpTableDir = util.getRandomDir(); TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index eaedebf846a..174cc1f1241 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -78,6 +78,7 @@ public abstract class MultiTableInputFormatTestBase { public static void setUpBeforeClass() throws Exception { // switch TIF to log at DEBUG level TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); + TEST_UTIL.setJobWithoutMRCluster(); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table @@ -92,13 +93,10 @@ public abstract class MultiTableInputFormatTestBase { } } } - // start MR cluster - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } @@ -270,7 +268,8 @@ public abstract class MultiTableInputFormatTestBase { initJob(scans, job); job.setReducerClass(ScanReducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key - FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); + FileOutputFormat.setOutputPath(job, + new Path(TEST_UTIL.getDataTestDirOnTestFS(), job.getJobName())); LOG.info("Started " + job.getJobName()); job.waitForCompletion(true); assertTrue(job.isSuccessful()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 38088069a24..7d1267a5bb3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -60,6 +60,7 @@ public abstract class TableSnapshotInputFormatTestBase { public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(NUM_REGION_SERVERS, true); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -121,7 +122,7 @@ public abstract class TableSnapshotInputFormatTestBase { try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); - Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); + Path tmpTableDir = UTIL.getRandomDir(); testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); @@ -158,14 +159,12 @@ public abstract class TableSnapshotInputFormatTestBase { protected void testWithMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, int expectedNumSplits, boolean shutdownCluster) throws Exception { setupCluster(); - util.startMiniMapReduceCluster(); try { - Path tableDir = util.getDataTestDirOnTestFS(snapshotName); + Path tableDir = util.getRandomDir(); TableName tableName = TableName.valueOf("testWithMapReduce"); testWithMapReduceImpl(util, tableName, snapshotName, tableDir, numRegions, expectedNumSplits, shutdownCluster); } finally { - util.shutdownMiniMapReduceCluster(); tearDownCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index e3d03b8fafb..41b76ba8af2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -60,15 +60,14 @@ public class TestCellCounter { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - UTIL.startMiniMapReduceCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem()); FileUtil.fullyDelete(new File(OUTPUT_DIR)); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 5492938294e..b8ad5be6981 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -62,13 +62,12 @@ public class TestCopyTable { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index 2bdcbc1fc37..b4bae992ea3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -390,6 +390,7 @@ public class TestHFileOutputFormat { byte[][] splitKeys = generateRandomSplitKeys(4); HBaseAdmin admin = null; try { + util.setJobWithoutMRCluster(); util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); admin = util.getHBaseAdmin(); @@ -403,7 +404,6 @@ public class TestHFileOutputFormat { assertEquals("Should make 5 regions", numRegions, 5); // Generate the bulk load files - util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table, testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", @@ -472,7 +472,6 @@ public class TestHFileOutputFormat { tableDigestBefore, util.checksumRows(table)); } finally { if (admin != null) admin.close(); - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -919,6 +918,7 @@ public class TestHFileOutputFormat { generateRandomStartKeys(5); try { + util.setJobWithoutMRCluster(); util.startMiniCluster(); final FileSystem fs = util.getDFSCluster().getFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); @@ -935,7 +935,6 @@ public class TestHFileOutputFormat { // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); @@ -974,7 +973,6 @@ public class TestHFileOutputFormat { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -986,9 +984,10 @@ public class TestHFileOutputFormat { generateRandomStartKeys(5); try { + util.setJobWithoutMRCluster(); util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); - final FileSystem fs = util.getDFSCluster().getFileSystem(); + final FileSystem fs = util.getTestFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); @@ -1015,7 +1014,6 @@ public class TestHFileOutputFormat { // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table, testDir); // Perform the actual load @@ -1051,7 +1049,6 @@ public class TestHFileOutputFormat { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index e55affcabfb..bb1a073e5f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -421,6 +421,7 @@ public class TestHFileOutputFormat2 { for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } + util.setJobWithoutMRCluster(); util.startMiniCluster(1, hostCount, hostnames); HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys); @@ -431,7 +432,6 @@ public class TestHFileOutputFormat2 { assertEquals("Should make " + regionNum + " regions", numRegions, regionNum); // Generate the bulk load files - util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table.getTableDescriptor(), table.getRegionLocator(), testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", 0, util.countRows(table)); @@ -511,7 +511,6 @@ public class TestHFileOutputFormat2 { } finally { testDir.getFileSystem(conf).delete(testDir, true); util.deleteTable(TABLE_NAME); - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -956,7 +955,7 @@ public class TestHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); - + util.setJobWithoutMRCluster(); util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(); Admin admin = conn.getAdmin()) { @@ -974,7 +973,6 @@ public class TestHFileOutputFormat2 { // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); @@ -1016,7 +1014,6 @@ public class TestHFileOutputFormat2 { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } @@ -1026,7 +1023,7 @@ public class TestHFileOutputFormat2 { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); - + util.setJobWithoutMRCluster(); util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()){ @@ -1058,7 +1055,6 @@ public class TestHFileOutputFormat2 { // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); - util.startMiniMapReduceCluster(); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME); runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir); @@ -1098,7 +1094,6 @@ public class TestHFileOutputFormat2 { }, 5000); } finally { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index 220bc025aa5..4354da5c9a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -34,13 +34,12 @@ public class TestHRegionPartitioner { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index 762f5301634..eae33a60b0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -56,13 +56,12 @@ public class TestHashTable { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 2e06fab3aa8..2154f990922 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -107,15 +107,14 @@ public class TestImportExport { public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - UTIL.startMiniMapReduceCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index eba843e7a6b..acb0e7cbe7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -102,14 +102,12 @@ public class TestImportTSVWithOperationAttributes implements Configurable { conf = util.getConfiguration(); conf.set("hbase.coprocessor.master.classes", OperationAttributesTestController.class.getName()); conf.set("hbase.coprocessor.region.classes", OperationAttributesTestController.class.getName()); + util.setJobWithoutMRCluster(); util.startMiniCluster(); - Admin admin = new HBaseAdmin(util.getConfiguration()); - util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index 268bba2a10d..5add5d4e51a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -88,13 +88,12 @@ public class TestImportTSVWithTTLs implements Configurable { // need this where the default hfile version is not 3 (i.e. 0.98) conf.setInt("hfile.format.version", 3); conf.set("hbase.coprocessor.region.classes", TTLCheckingObserver.class.getName()); + util.setJobWithoutMRCluster(); util.startMiniCluster(); - util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index 5adc04a7656..40c4b4d3a19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -103,13 +103,12 @@ public class TestImportTsv implements Configurable { @BeforeClass public static void provisionCluster() throws Exception { + util.setJobWithoutMRCluster(); util.startMiniCluster(); - util.startMiniMapReduceCluster(); } @AfterClass public static void releaseCluster() throws Exception { - util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index a54df0830e0..8a4a24497cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -71,6 +71,7 @@ public class TestMultiTableInputFormat { // switch TIF to log at DEBUG level TEST_UTIL.enableDebug(MultiTableInputFormat.class); TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); + TEST_UTIL.setJobWithoutMRCluster(); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table @@ -81,13 +82,10 @@ public class TestMultiTableInputFormat { TEST_UTIL.loadTable(table, INPUT_FAMILY, false); } } - // start MR cluster - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } @@ -242,7 +240,8 @@ public class TestMultiTableInputFormat { ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); job.setReducerClass(ScanReducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key - FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); + FileOutputFormat.setOutputPath(job, + new Path(TEST_UTIL.getDataTestDirOnTestFS(), job.getJobName())); LOG.info("Started " + job.getJobName()); job.waitForCompletion(true); assertTrue(job.isSuccessful()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index 6b2ee753a99..9a81990688d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -72,18 +72,17 @@ public class TestMultithreadedTableMapper { public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); HTable table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); - UTIL.startMiniMapReduceCluster(); UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index 817e7a9a318..0e04c673003 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -80,8 +80,8 @@ public class TestRowCounter { @BeforeClass public static void setUpBeforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(); - TEST_UTIL.startMiniMapReduceCluster(); Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), Bytes.toBytes(COL_FAM)); writeRows(table); table.close(); @@ -94,7 +94,6 @@ public class TestRowCounter { public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); - TEST_UTIL.shutdownMiniMapReduceCluster(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index 2f1bee3199b..1b356e6253e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -64,13 +64,12 @@ public class TestSyncTable { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); TEST_UTIL.startMiniCluster(3); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index c53510fcc9e..d404833716b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -77,20 +77,18 @@ public class TestTableInputFormat { private static final Log LOG = LogFactory.getLog(TestTableInputFormat.class); private final static HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static MiniMRCluster mrCluster; static final byte[] FAMILY = Bytes.toBytes("family"); private static final byte[][] columns = new byte[][] { FAMILY }; @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); - mrCluster = UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index 8aed5884094..022d4c9351b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -79,17 +79,16 @@ public abstract class TestTableMapReduceBase { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); HTable table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); - UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index c3dcd8f0e9e..1b80590af96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -155,7 +155,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = new Job(UTIL.getConfiguration()); - Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); + Path tmpTableDir = UTIL.getRandomDir(); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, @@ -196,7 +196,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); Job job = new Job(util.getConfiguration()); - Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName); + Path tmpTableDir = util.getRandomDir(); Scan scan = new Scan(getStartRow(), getEndRow()); // limit the scan TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index cd85756062a..207f256f291 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -83,6 +83,7 @@ public class TestTimeRangeMapRed { @BeforeClass public static void beforeClass() throws Exception { + UTIL.setJobWithoutMRCluster(); UTIL.startMiniCluster(); } @@ -167,7 +168,6 @@ public class TestTimeRangeMapRed { private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { - UTIL.startMiniMapReduceCluster(); Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); @@ -184,7 +184,6 @@ public class TestTimeRangeMapRed { // TODO Auto-generated catch block e.printStackTrace(); } finally { - UTIL.shutdownMiniMapReduceCluster(); if (job != null) { FileUtil.fullyDelete( new File(job.getConfiguration().get("hadoop.tmp.dir"))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index d6929a255bb..343fc64b409 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -70,13 +70,12 @@ public class TestWALPlayer { @BeforeClass public static void beforeClass() throws Exception { + TEST_UTIL.setJobWithoutMRCluster(); cluster = TEST_UTIL.startMiniCluster(); - TEST_UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { - TEST_UTIL.shutdownMiniMapReduceCluster(); TEST_UTIL.shutdownMiniCluster(); } From c0ad4cdd7a3cffc354f2fa509a811b2a09ff0038 Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 22 Dec 2015 19:49:14 -0800 Subject: [PATCH 53/72] HBASE-15021 hadoopqa doing false positives --- dev-support/test-patch.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 93663193255..509edeb7d2b 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -841,14 +841,13 @@ runTests () { echo "======================================================================" echo "" echo "" - failed_tests="" echo "$MVN clean test -Dsurefire.rerunFailingTestsCount=2 -P runAllTests -D${PROJECT_NAME}PatchProcess" export MAVEN_OPTS="${MAVEN_OPTS}" ulimit -a - $MVN clean test -Dsurefire.rerunFailingTestsCount=2 -P runAllTests -D${PROJECT_NAME}PatchProcess # Need to export this so the zombie subshell picks up current content export JIRA_COMMENT + $MVN clean test -Dsurefire.rerunFailingTestsCount=2 -P runAllTests -D${PROJECT_NAME}PatchProcess if [[ $? != 0 ]] ; then ### Find and format names of failed tests failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E " Date: Wed, 23 Dec 2015 07:29:18 -0800 Subject: [PATCH 54/72] HBASE-15018 Inconsistent way of handling TimeoutException in the rpc client implemenations (Ashish Singhi) --- .../hadoop/hbase/ipc/AbstractRpcClient.java | 39 +++++++++++++++++-- .../hadoop/hbase/ipc/AsyncRpcClient.java | 9 ++--- .../hadoop/hbase/ipc/RpcClientImpl.java | 31 --------------- 3 files changed, 38 insertions(+), 41 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 6f5e78aeb7d..e33ef3a1479 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -24,6 +24,13 @@ import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; + +import java.io.IOException; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.SocketTimeoutException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -34,6 +41,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.KeyValueCodec; +import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -41,10 +49,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PoolMap; import org.apache.hadoop.io.compress.CompressionCodec; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.SocketAddress; - /** * Provides the basics for a RpcClient implementation like configuration and Logging. */ @@ -257,6 +261,33 @@ public abstract class AbstractRpcClient implements RpcClient { return new BlockingRpcChannelImplementation(this, sn, ticket, defaultOperationTimeout); } + /** + * Takes an Exception and the address we were trying to connect to and return an IOException with + * the input exception as the cause. The new exception provides the stack trace of the place where + * the exception is thrown and some extra diagnostics information. If the exception is + * ConnectException or SocketTimeoutException, return a new one of the same type; Otherwise return + * an IOException. + * @param addr target address + * @param exception the relevant exception + * @return an exception to throw + */ + protected IOException wrapException(InetSocketAddress addr, Exception exception) { + if (exception instanceof ConnectException) { + // connection refused; include the host:port in the error + return (ConnectException) new ConnectException("Call to " + addr + + " failed on connection exception: " + exception).initCause(exception); + } else if (exception instanceof SocketTimeoutException) { + return (SocketTimeoutException) new SocketTimeoutException("Call to " + addr + + " failed because " + exception).initCause(exception); + } else if (exception instanceof ConnectionClosingException) { + return (ConnectionClosingException) new ConnectionClosingException("Call to " + addr + + " failed on local exception: " + exception).initCause(exception); + } else { + return (IOException) new IOException("Call to " + addr + " failed on local exception: " + + exception).initCause(exception); + } + } + /** * Blocking rpc channel that goes via hbase rpc. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java index 60e9add9f13..a92a8484780 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java @@ -248,13 +248,10 @@ public class AsyncRpcClient extends AbstractRpcClient { Message response = timeout > 0 ? promise.get(timeout, TimeUnit.MILLISECONDS) : promise.get(); return new Pair<>(response, pcrc.cellScanner()); } catch (ExecutionException e) { - if (e.getCause() instanceof IOException) { - throw (IOException) e.getCause(); - } else { - throw new IOException(e.getCause()); - } + throw wrapException(addr, e); } catch (TimeoutException e) { - throw new CallTimeoutException(promise.toString()); + CallTimeoutException cte = new CallTimeoutException(promise.toString()); + throw wrapException(addr, cte); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java index 21b257fa80d..544113ddd44 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java @@ -81,7 +81,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.InterruptedIOException; import java.io.OutputStream; -import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; @@ -1266,36 +1265,6 @@ public class RpcClientImpl extends AbstractRpcClient { } - /** - * Take an IOException and the address we were trying to connect to - * and return an IOException with the input exception as the cause. - * The new exception provides the stack trace of the place where - * the exception is thrown and some extra diagnostics information. - * If the exception is ConnectException or SocketTimeoutException, - * return a new one of the same type; Otherwise return an IOException. - * - * @param addr target address - * @param exception the relevant exception - * @return an exception to throw - */ - protected IOException wrapException(InetSocketAddress addr, - IOException exception) { - if (exception instanceof ConnectException) { - //connection refused; include the host:port in the error - return (ConnectException)new ConnectException( - "Call to " + addr + " failed on connection exception: " + exception).initCause(exception); - } else if (exception instanceof SocketTimeoutException) { - return (SocketTimeoutException)new SocketTimeoutException("Call to " + addr + - " failed because " + exception).initCause(exception); - } else if (exception instanceof ConnectionClosingException){ - return (ConnectionClosingException) new ConnectionClosingException( - "Call to " + addr + " failed on local exception: " + exception).initCause(exception); - } else { - return (IOException)new IOException("Call to " + addr + " failed on local exception: " + - exception).initCause(exception); - } - } - /** * Interrupt the connections to the given ip:port server. This should be called if the server * is known as actually dead. This will not prevent current operation to be retried, and, From d65210d2138a59b91aef6443b6b26435a27a587a Mon Sep 17 00:00:00 2001 From: Matteo Bertozzi Date: Wed, 23 Dec 2015 09:46:18 -0800 Subject: [PATCH 55/72] HBASE-15030 Deadlock in master TableNamespaceManager while running IntegrationTestDDLMasterFailover --- .../apache/hadoop/hbase/master/TableNamespaceManager.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index f4be427ea66..a4746c83c99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -134,7 +134,7 @@ public class TableNamespaceManager { return nsTable; } - private synchronized boolean acquireSharedLock() throws IOException { + private boolean acquireSharedLock() throws IOException { try { return rwLock.readLock().tryLock(sharedLockTimeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { @@ -142,11 +142,11 @@ public class TableNamespaceManager { } } - public synchronized void releaseSharedLock() { + public void releaseSharedLock() { rwLock.readLock().unlock(); } - public synchronized boolean acquireExclusiveLock() { + public boolean acquireExclusiveLock() { try { return rwLock.writeLock().tryLock(exclusiveLockTimeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { @@ -154,7 +154,7 @@ public class TableNamespaceManager { } } - public synchronized void releaseExclusiveLock() { + public void releaseExclusiveLock() { rwLock.writeLock().unlock(); } From fdeca854ec8aa842136fb4f93f81895113e5b70e Mon Sep 17 00:00:00 2001 From: stack Date: Wed, 23 Dec 2015 15:31:35 -0800 Subject: [PATCH 56/72] Revert "HBASE-15018 Inconsistent way of handling TimeoutException in the rpc client implemenations (Ashish Singhi)" This reverts commit 59cca6297f9fcecec6aaeecb760ae7f27b0d0e29. --- .../hadoop/hbase/ipc/AbstractRpcClient.java | 39 ++----------------- .../hadoop/hbase/ipc/AsyncRpcClient.java | 9 +++-- .../hadoop/hbase/ipc/RpcClientImpl.java | 31 +++++++++++++++ 3 files changed, 41 insertions(+), 38 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index e33ef3a1479..6f5e78aeb7d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -24,13 +24,6 @@ import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; - -import java.io.IOException; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.SocketTimeoutException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -41,7 +34,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.KeyValueCodec; -import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -49,6 +41,10 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PoolMap; import org.apache.hadoop.io.compress.CompressionCodec; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; + /** * Provides the basics for a RpcClient implementation like configuration and Logging. */ @@ -261,33 +257,6 @@ public abstract class AbstractRpcClient implements RpcClient { return new BlockingRpcChannelImplementation(this, sn, ticket, defaultOperationTimeout); } - /** - * Takes an Exception and the address we were trying to connect to and return an IOException with - * the input exception as the cause. The new exception provides the stack trace of the place where - * the exception is thrown and some extra diagnostics information. If the exception is - * ConnectException or SocketTimeoutException, return a new one of the same type; Otherwise return - * an IOException. - * @param addr target address - * @param exception the relevant exception - * @return an exception to throw - */ - protected IOException wrapException(InetSocketAddress addr, Exception exception) { - if (exception instanceof ConnectException) { - // connection refused; include the host:port in the error - return (ConnectException) new ConnectException("Call to " + addr - + " failed on connection exception: " + exception).initCause(exception); - } else if (exception instanceof SocketTimeoutException) { - return (SocketTimeoutException) new SocketTimeoutException("Call to " + addr - + " failed because " + exception).initCause(exception); - } else if (exception instanceof ConnectionClosingException) { - return (ConnectionClosingException) new ConnectionClosingException("Call to " + addr - + " failed on local exception: " + exception).initCause(exception); - } else { - return (IOException) new IOException("Call to " + addr + " failed on local exception: " - + exception).initCause(exception); - } - } - /** * Blocking rpc channel that goes via hbase rpc. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java index a92a8484780..60e9add9f13 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java @@ -248,10 +248,13 @@ public class AsyncRpcClient extends AbstractRpcClient { Message response = timeout > 0 ? promise.get(timeout, TimeUnit.MILLISECONDS) : promise.get(); return new Pair<>(response, pcrc.cellScanner()); } catch (ExecutionException e) { - throw wrapException(addr, e); + if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } else { + throw new IOException(e.getCause()); + } } catch (TimeoutException e) { - CallTimeoutException cte = new CallTimeoutException(promise.toString()); - throw wrapException(addr, cte); + throw new CallTimeoutException(promise.toString()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java index 544113ddd44..21b257fa80d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java @@ -81,6 +81,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InterruptedIOException; import java.io.OutputStream; +import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; @@ -1265,6 +1266,36 @@ public class RpcClientImpl extends AbstractRpcClient { } + /** + * Take an IOException and the address we were trying to connect to + * and return an IOException with the input exception as the cause. + * The new exception provides the stack trace of the place where + * the exception is thrown and some extra diagnostics information. + * If the exception is ConnectException or SocketTimeoutException, + * return a new one of the same type; Otherwise return an IOException. + * + * @param addr target address + * @param exception the relevant exception + * @return an exception to throw + */ + protected IOException wrapException(InetSocketAddress addr, + IOException exception) { + if (exception instanceof ConnectException) { + //connection refused; include the host:port in the error + return (ConnectException)new ConnectException( + "Call to " + addr + " failed on connection exception: " + exception).initCause(exception); + } else if (exception instanceof SocketTimeoutException) { + return (SocketTimeoutException)new SocketTimeoutException("Call to " + addr + + " failed because " + exception).initCause(exception); + } else if (exception instanceof ConnectionClosingException){ + return (ConnectionClosingException) new ConnectionClosingException( + "Call to " + addr + " failed on local exception: " + exception).initCause(exception); + } else { + return (IOException)new IOException("Call to " + addr + " failed on local exception: " + + exception).initCause(exception); + } + } + /** * Interrupt the connections to the given ip:port server. This should be called if the server * is known as actually dead. This will not prevent current operation to be retried, and, From 4a7565af9cf8ef7e40ef3c592d6815d1b671fb5e Mon Sep 17 00:00:00 2001 From: anoopsjohn Date: Thu, 24 Dec 2015 07:56:27 +0530 Subject: [PATCH 57/72] HBASE-14940 Make our unsafe based ops more safe. --- .../hadoop/hbase/filter/FuzzyRowFilter.java | 6 +-- .../org/apache/hadoop/hbase/util/Bytes.java | 46 +++++++++---------- .../hadoop/hbase/util/UnsafeAccess.java | 19 ++++++++ 3 files changed, 45 insertions(+), 26 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index 3bc5b7440c2..0158680e782 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -93,7 +93,7 @@ public class FuzzyRowFilter extends FilterBase { } private void preprocessSearchKey(Pair p) { - if (UnsafeAccess.isAvailable() == false) { + if (UnsafeAccess.unaligned() == false) { return; } byte[] key = p.getFirst(); @@ -111,7 +111,7 @@ public class FuzzyRowFilter extends FilterBase { * @return mask array */ private byte[] preprocessMask(byte[] mask) { - if (UnsafeAccess.isAvailable() == false) { + if (UnsafeAccess.unaligned() == false) { return mask; } if (isPreprocessedMask(mask)) return mask; @@ -320,7 +320,7 @@ public class FuzzyRowFilter extends FilterBase { static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int length, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { - if (UnsafeAccess.isAvailable() == false) { + if (UnsafeAccess.unaligned() == false) { return satisfiesNoUnsafe(reverse, row, offset, length, fuzzyKeyBytes, fuzzyKeyMeta); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index a5a6cca8c2a..3d709a5a3c3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -604,7 +604,7 @@ public class Bytes { if (length != SIZEOF_LONG || offset + length > bytes.length) { throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_LONG); } - if (UnsafeComparer.isAvailable()) { + if (UnsafeComparer.unaligned()) { return toLongUnsafe(bytes, offset); } else { long l = 0; @@ -645,7 +645,7 @@ public class Bytes { throw new IllegalArgumentException("Not enough room to put a long at" + " offset " + offset + " in a " + bytes.length + " byte array"); } - if (UnsafeComparer.isAvailable()) { + if (UnsafeComparer.unaligned()) { return putLongUnsafe(bytes, offset, val); } else { for(int i = offset + 7; i > offset; i--) { @@ -800,7 +800,7 @@ public class Bytes { if (length != SIZEOF_INT || offset + length > bytes.length) { throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_INT); } - if (UnsafeComparer.isAvailable()) { + if (UnsafeComparer.unaligned()) { return toIntUnsafe(bytes, offset); } else { int n = 0; @@ -896,7 +896,7 @@ public class Bytes { throw new IllegalArgumentException("Not enough room to put an int at" + " offset " + offset + " in a " + bytes.length + " byte array"); } - if (UnsafeComparer.isAvailable()) { + if (UnsafeComparer.unaligned()) { return putIntUnsafe(bytes, offset, val); } else { for(int i= offset + 3; i > offset; i--) { @@ -970,7 +970,7 @@ public class Bytes { if (length != SIZEOF_SHORT || offset + length > bytes.length) { throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_SHORT); } - if (UnsafeComparer.isAvailable()) { + if (UnsafeComparer.unaligned()) { return toShortUnsafe(bytes, offset); } else { short n = 0; @@ -1008,7 +1008,7 @@ public class Bytes { throw new IllegalArgumentException("Not enough room to put a short at" + " offset " + offset + " in a " + bytes.length + " byte array"); } - if (UnsafeComparer.isAvailable()) { + if (UnsafeComparer.unaligned()) { return putShortUnsafe(bytes, offset, val); } else { bytes[offset+1] = (byte) val; @@ -1315,28 +1315,19 @@ public class Bytes { INSTANCE; static final Unsafe theUnsafe; + private static boolean unaligned = false; /** The offset to the first element in a byte array. */ static final int BYTE_ARRAY_BASE_OFFSET; static { - theUnsafe = (Unsafe) AccessController.doPrivileged( - new PrivilegedAction() { - @Override - public Object run() { - try { - Field f = Unsafe.class.getDeclaredField("theUnsafe"); - f.setAccessible(true); - return f.get(null); - } catch (NoSuchFieldException e) { - // It doesn't matter what we throw; - // it's swallowed in getBestComparer(). - throw new Error(); - } catch (IllegalAccessException e) { - throw new Error(); - } - } - }); + if (UnsafeAccess.unaligned()) { + theUnsafe = UnsafeAccess.theUnsafe; + } else { + // It doesn't matter what we throw; + // it's swallowed in getBestComparer(). + throw new Error(); + } BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class); @@ -1344,6 +1335,7 @@ public class Bytes { if (theUnsafe.arrayIndexScale(byte[].class) != 1) { throw new AssertionError(); } + unaligned = UnsafeAccess.unaligned(); } static final boolean littleEndian = @@ -1403,6 +1395,14 @@ public class Bytes { return theUnsafe != null; } + /** + * @return true when running JVM is having sun's Unsafe package available in it and underlying + * system having unaligned-access capability. + */ + public static boolean unaligned() { + return unaligned; + } + /** * Lexicographically compare two arrays. * diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java index 680bee4da80..1a3460795cd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.util; import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.nio.ByteOrder; import java.security.AccessController; import java.security.PrivilegedAction; @@ -39,6 +40,7 @@ public final class UnsafeAccess { /** The offset to the first element in a byte array. */ public static final int BYTE_ARRAY_BASE_OFFSET; + private static boolean unaligned = false; static { theUnsafe = (Unsafe) AccessController.doPrivileged(new PrivilegedAction() { @@ -57,6 +59,15 @@ public final class UnsafeAccess { if(theUnsafe != null){ BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class); + try { + // Using java.nio.Bits#unaligned() to check for unaligned-access capability + Class clazz = Class.forName("java.nio.Bits"); + Method m = clazz.getDeclaredMethod("unaligned"); + m.setAccessible(true); + unaligned = (boolean) m.invoke(null); + } catch (Exception e) { + unaligned = false; + } } else{ BYTE_ARRAY_BASE_OFFSET = -1; } @@ -68,6 +79,14 @@ public final class UnsafeAccess { return theUnsafe != null; } + /** + * @return true when running JVM is having sun's Unsafe package available in it and underlying + * system having unaligned-access capability. + */ + public static boolean unaligned() { + return unaligned; + } + public static final boolean littleEndian = ByteOrder.nativeOrder() .equals(ByteOrder.LITTLE_ENDIAN); } From b59f0240e5a3aeb434d72ffe5d0575810d23dcf3 Mon Sep 17 00:00:00 2001 From: Samir Ahmic Date: Wed, 23 Dec 2015 23:30:26 +0100 Subject: [PATCH 58/72] HBASE-15034 IntegrationTestDDLMasterFailover does not clean created namespaces. Signed-off-by: Matteo Bertozzi --- .../hbase/IntegrationTestDDLMasterFailover.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java index dffde55a3af..47a3e4fc7bc 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java @@ -103,7 +103,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase { protected static final int DEFAULT_NUM_REGIONS = 50; // number of regions in pre-split tables - private boolean keepTableAtTheEnd = false; + private boolean keepObjectsAtTheEnd = false; protected HBaseCluster cluster; protected Connection connection; @@ -144,11 +144,19 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase { @Override public void cleanUpCluster() throws Exception { - if (!keepTableAtTheEnd) { + if (!keepObjectsAtTheEnd) { Admin admin = util.getHBaseAdmin(); admin.disableTables("ittable-\\d+"); admin.deleteTables("ittable-\\d+"); + NamespaceDescriptor [] nsds = admin.listNamespaceDescriptors(); + for(NamespaceDescriptor nsd:nsds ) { + if(nsd.getName().matches("itnamespace\\d+")) { + LOG.info("Removing namespace="+nsd.getName()); + admin.deleteNamespace(nsd.getName()); + } + } } + enabledTables.clear(); disabledTables.clear(); deletedTables.clear(); @@ -938,9 +946,9 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase { LOG.info("Running hbck"); hbck = HbckTestingUtil.doFsck(util.getConfiguration(), false); if (HbckTestingUtil.inconsistencyFound(hbck)) { - // Find the inconsistency during HBCK. Leave table undropped so that + // Find the inconsistency during HBCK. Leave table and namespace undropped so that // we can check outside the test. - keepTableAtTheEnd = true; + keepObjectsAtTheEnd = true; } HbckTestingUtil.assertNoErrors(hbck); LOG.info("Finished hbck"); From a6eea24f711106f1f162453df54aebf9ebb6c6dc Mon Sep 17 00:00:00 2001 From: tedyu Date: Thu, 24 Dec 2015 07:02:04 -0800 Subject: [PATCH 59/72] HBASE-15032 hbase shell scan filter string assumes UTF-8 encoding (huaxiang sun) --- hbase-shell/src/main/ruby/hbase/table.rb | 6 ++++-- hbase-shell/src/test/ruby/hbase/table_test.rb | 16 ++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb index 0946eb2eb60..74868c2f39c 100644 --- a/hbase-shell/src/main/ruby/hbase/table.rb +++ b/hbase-shell/src/main/ruby/hbase/table.rb @@ -358,7 +358,8 @@ EOF unless filter.class == String get.setFilter(filter) else - get.setFilter(org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter)) + get.setFilter( + org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter.to_java_bytes)) end get.setConsistency(org.apache.hadoop.hbase.client.Consistency.valueOf(consistency)) if consistency @@ -454,7 +455,8 @@ EOF unless filter.class == String scan.setFilter(filter) else - scan.setFilter(org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter)) + scan.setFilter( + org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter.to_java_bytes)) end scan.setScanMetricsEnabled(enablemetrics) if enablemetrics diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb b/hbase-shell/src/test/ruby/hbase/table_test.rb index 184e0d4a601..d74c6d88fed 100644 --- a/hbase-shell/src/test/ruby/hbase/table_test.rb +++ b/hbase-shell/src/test/ruby/hbase/table_test.rb @@ -598,6 +598,22 @@ module Hbase end end + define_test "scan should support FILTER with non-ASCII bytes" do + @test_table.put(4, "x:a", "\x82") + begin + res = @test_table._scan_internal FILTER => "SingleColumnValueFilter('x', 'a', >=, 'binary:\x82', true, true)" + assert_not_equal(res, {}, "Result is empty") + assert_kind_of(Hash, res) + assert_not_nil(res['4']) + assert_not_nil(res['4']['x:a']) + assert_nil(res['1']) + assert_nil(res['2']) + ensure + # clean up newly added columns for this test only. + @test_table.delete(4, "x:a") + end + end + define_test "mutation with TTL should expire" do @test_table.put('ttlTest', 'x:a', 'foo', { TTL => 1000 } ) begin From afaa7f843ab02600062f86ae5aca2bca50928e00 Mon Sep 17 00:00:00 2001 From: tedyu Date: Thu, 24 Dec 2015 11:18:32 -0800 Subject: [PATCH 60/72] HBASE-14717 enable_table_replication command should only create specified table for a peer cluster (Ashish) --- .../client/replication/ReplicationAdmin.java | 13 ++++- .../replication/ReplicationPeerZKImpl.java | 20 +++++++- .../TestReplicationAdminWithClusters.java | 51 ++++++++++++++++--- 3 files changed, 73 insertions(+), 11 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 24a3dcb0831..d7f58a9b131 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -600,7 +600,17 @@ public class ReplicationAdmin implements Closeable { if (repPeers == null || repPeers.size() <= 0) { throw new IllegalArgumentException("Found no peer cluster for replication."); } + + final TableName onlyTableNameQualifier = TableName.valueOf(tableName.getQualifierAsString()); + for (ReplicationPeer repPeer : repPeers) { + Map> tableCFMap = repPeer.getTableCFs(); + // TODO Currently peer TableCFs will not include namespace so we need to check only for table + // name without namespace in it. Need to correct this logic once we fix HBASE-11386. + if (tableCFMap != null && !tableCFMap.containsKey(onlyTableNameQualifier)) { + continue; + } + Configuration peerConf = repPeer.getConfiguration(); HTableDescriptor htd = null; try (Connection conn = ConnectionFactory.createConnection(peerConf); @@ -639,7 +649,8 @@ public class ReplicationAdmin implements Closeable { try { Pair pair = this.replicationPeers.getPeerConf(peerId); Configuration peerConf = pair.getSecond(); - ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst()); + ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst(), + parseTableCFsFromConfig(this.getPeerTableCFs(peerId))); s = zkw.getRecoverableZooKeeper().exists(peerConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT), null); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index e1ff0787a4d..a0d7b5fcb19 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -55,8 +55,8 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea private TableCFsTracker tableCFsTracker; /** - * Constructor that takes all the objects required to communicate with the - * specified peer, except for the region server addresses. + * Constructor that takes all the objects required to communicate with the specified peer, except + * for the region server addresses. * @param conf configuration object to this peer * @param id string representation of this peer's identifier * @param peerConfig configuration for the replication peer @@ -67,6 +67,22 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea this.peerConfig = peerConfig; this.id = id; } + + /** + * Constructor that takes all the objects required to communicate with the specified peer, except + * for the region server addresses. + * @param conf configuration object to this peer + * @param id string representation of this peer's identifier + * @param peerConfig configuration for the replication peer + * @param tableCFs table-cf configuration for this peer + */ + public ReplicationPeerZKImpl(Configuration conf, String id, ReplicationPeerConfig peerConfig, + Map> tableCFs) throws ReplicationException { + this.conf = conf; + this.peerConfig = peerConfig; + this.id = id; + this.tableCFs = tableCFs; + } /** * start a state tracker to check whether this peer is enabled or not diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java index 0d4e853a075..d628a7c0b2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java @@ -15,6 +15,10 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; @@ -40,6 +44,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { static Connection connection2; static Admin admin1; static Admin admin2; + static ReplicationAdmin adminExt; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -48,12 +53,14 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { connection2 = ConnectionFactory.createConnection(conf2); admin1 = connection1.getAdmin(); admin2 = connection2.getAdmin(); + adminExt = new ReplicationAdmin(conf1); } @AfterClass public static void tearDownAfterClass() throws Exception { admin1.close(); admin2.close(); + adminExt.close(); connection1.close(); connection2.close(); TestReplicationBase.tearDownAfterClass(); @@ -64,7 +71,6 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { admin2.disableTable(tableName); admin2.deleteTable(tableName); assertFalse(admin2.tableExists(tableName)); - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); adminExt.enableTableRep(tableName); assertTrue(admin2.tableExists(tableName)); } @@ -83,7 +89,6 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { admin2.modifyTable(tableName, table); admin2.enableTable(tableName); - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); adminExt.enableTableRep(tableName); table = admin1.getTableDescriptor(tableName); for (HColumnDescriptor fam : table.getColumnFamilies()) { @@ -100,7 +105,6 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { admin2.modifyTable(tableName, table); admin2.enableTable(tableName); - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); try { adminExt.enableTableRep(tableName); fail("Exception should be thrown if table descriptors in the clusters are not same."); @@ -119,7 +123,6 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { @Test(timeout = 300000) public void testDisableAndEnableReplication() throws Exception { - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); adminExt.disableTableRep(tableName); HTableDescriptor table = admin1.getTableDescriptor(tableName); for (HColumnDescriptor fam : table.getColumnFamilies()) { @@ -138,25 +141,57 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { @Test(timeout = 300000, expected = TableNotFoundException.class) public void testDisableReplicationForNonExistingTable() throws Exception { - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); adminExt.disableTableRep(TableName.valueOf("nonExistingTable")); } @Test(timeout = 300000, expected = TableNotFoundException.class) public void testEnableReplicationForNonExistingTable() throws Exception { - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); adminExt.enableTableRep(TableName.valueOf("nonExistingTable")); } @Test(timeout = 300000, expected = IllegalArgumentException.class) public void testDisableReplicationWhenTableNameAsNull() throws Exception { - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); adminExt.disableTableRep(null); } @Test(timeout = 300000, expected = IllegalArgumentException.class) public void testEnableReplicationWhenTableNameAsNull() throws Exception { - ReplicationAdmin adminExt = new ReplicationAdmin(conf1); adminExt.enableTableRep(null); } + + /* + * Test enable table replication should create table only in user explicit specified table-cfs. + * HBASE-14717 + */ + @Test(timeout = 300000) + public void testEnableReplicationForExplicitSetTableCfs() throws Exception { + TableName tn = TableName.valueOf("testEnableReplicationForSetTableCfs"); + String peerId = "2"; + if (admin2.isTableAvailable(tableName)) { + admin2.disableTable(tableName); + admin2.deleteTable(tableName); + } + assertFalse("Table should not exists in the peer cluster", admin2.isTableAvailable(tableName)); + + Map> tableCfs = + new HashMap>(); + tableCfs.put(tn, null); + try { + adminExt.setPeerTableCFs(peerId, tableCfs); + adminExt.enableTableRep(tableName); + assertFalse("Table should not be created if user has set table cfs explicitly for the " + + "peer and this is not part of that collection", + admin2.isTableAvailable(tableName)); + + tableCfs.put(tableName, null); + adminExt.setPeerTableCFs(peerId, tableCfs); + adminExt.enableTableRep(tableName); + assertTrue( + "Table should be created if user has explicitly added table into table cfs collection", + admin2.isTableAvailable(tableName)); + } finally { + adminExt.removePeerTableCFs(peerId, adminExt.getPeerTableCFs(peerId)); + adminExt.disableTableRep(tableName); + } + } } From 31f8d71ffe2feec14fbf74c277439740216f52b4 Mon Sep 17 00:00:00 2001 From: Lars Hofhansl Date: Thu, 24 Dec 2015 12:55:39 -0800 Subject: [PATCH 61/72] HBASE-14822; addendum - handle callSeq. --- .../apache/hadoop/hbase/regionserver/RSRpcServices.java | 5 ++++- .../org/apache/hadoop/hbase/client/TestLeaseRenewal.java | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 325f7bc5169..c025f3d2399 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2386,9 +2386,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ttl = this.scannerLeaseTimeoutPeriod; } if (request.hasRenew() && request.getRenew()) { + rsh = scanners.get(scannerName); lease = regionServer.leases.removeLease(scannerName); - if (lease != null && scanners.containsKey(scannerName)) { + if (lease != null && rsh != null) { regionServer.leases.addLease(lease); + // Increment the nextCallSeq value which is the next expected from client. + rsh.incNextCallSeq(); } return builder.build(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java index fd9c9bb1236..3333f54be0f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java @@ -26,10 +26,13 @@ import java.util.Arrays; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ipc.MetricsHBaseServerSource; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; @@ -41,6 +44,8 @@ import org.junit.experimental.categories.Category; @Category(LargeTests.class) public class TestLeaseRenewal { + public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); + final Log LOG = LogFactory.getLog(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static byte[] FAMILY = Bytes.toBytes("testFamily"); @@ -121,5 +126,8 @@ public class TestLeaseRenewal { assertFalse(((AbstractClientScanner)rs).renewLease()); rs.close(); table.close(); + MetricsHBaseServerSource serverSource = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) + .getRpcServer().getMetrics().getMetricsSource(); + HELPER.assertCounter("exceptions.OutOfOrderScannerNextException", 0, serverSource); } } From 30361079c72c225eb0a2d0fb7bec612621448b9b Mon Sep 17 00:00:00 2001 From: tedyu Date: Fri, 25 Dec 2015 10:35:17 -0800 Subject: [PATCH 62/72] HBASE-15039 HMaster and RegionServers should try to refresh token keys from zk when facing InvalidToken (Yong Zhang) --- .../AuthenticationTokenSecretManager.java | 21 ++++ .../hbase/security/token/ZKSecretWatcher.java | 25 +++++ .../token/TestZKSecretWatcherRefreshKeys.java | 99 +++++++++++++++++++ 3 files changed, 145 insertions(+) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 524b6f983fe..1f2bec46293 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -143,6 +143,27 @@ public class AuthenticationTokenSecretManager throw new InvalidToken("Token has expired"); } AuthenticationKey masterKey = allKeys.get(identifier.getKeyId()); + if(masterKey == null) { + if(zkWatcher.getWatcher().isAborted()) { + LOG.error("ZookeeperWatcher is abort"); + throw new InvalidToken("Token keys could not be sync from zookeeper" + + " because of ZookeeperWatcher abort"); + } + synchronized (this) { + if (!leaderElector.isAlive() || leaderElector.isStopped()) { + LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" + + leaderElector.getId() + "] is stoped or not alive"); + leaderElector.start(); + LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" + + leaderElector.getId() + "] is started"); + } + } + zkWatcher.refreshKeys(); + if (LOG.isDebugEnabled()) { + LOG.debug("Sync token keys from zookeeper"); + } + masterKey = allKeys.get(identifier.getKeyId()); + } if (masterKey == null) { throw new InvalidToken("Unknown master key for token (id="+ identifier.getKeyId()+")"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java index 2264193bf2d..a1c3b6688b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.security.token; +import com.google.common.annotations.VisibleForTesting; + import java.io.IOException; import java.util.List; @@ -211,4 +213,27 @@ public class ZKSecretWatcher extends ZooKeeperListener { watcher.abort("Failed serializing key "+key.getKeyId(), ioe); } } + + /** + * refresh keys + */ + synchronized void refreshKeys() { + try { + List nodes = + ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + refreshNodes(nodes); + } catch (KeeperException ke) { + LOG.fatal("Error reading data from zookeeper", ke); + watcher.abort("Error reading changed keys from zookeeper", ke); + } + } + + /** + * get token keys parent node + * @return token keys parent node + */ + @VisibleForTesting + String getKeysParentZNode() { + return keysParentZNode; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java new file mode 100644 index 00000000000..b2396819c85 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security.token; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Writables; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test the refreshKeys in ZKSecretWatcher + */ +@Category({ SecurityTests.class, SmallTests.class }) +public class TestZKSecretWatcherRefreshKeys { + private static final Log LOG = LogFactory.getLog(TestZKSecretWatcherRefreshKeys.class); + private static HBaseTestingUtility TEST_UTIL; + + private static class MockAbortable implements Abortable { + private boolean abort; + public void abort(String reason, Throwable e) { + LOG.info("Aborting: "+reason, e); + abort = true; + } + + public boolean isAborted() { + return abort; + } + } + + @BeforeClass + public static void setupBeforeClass() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.startMiniZKCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniZKCluster(); + } + + private static ZooKeeperWatcher newZK(Configuration conf, String name, + Abortable abort) throws Exception { + Configuration copy = HBaseConfiguration.create(conf); + ZooKeeperWatcher zk = new ZooKeeperWatcher(copy, name, abort); + return zk; + } + + @Test + public void testRefreshKeys() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + ZooKeeperWatcher zk = newZK(conf, "127.0.0.1", new MockAbortable()); + AuthenticationTokenSecretManager keyManager = + new AuthenticationTokenSecretManager(conf, zk, "127.0.0.1", + 60 * 60 * 1000, 60 * 1000); + ZKSecretWatcher watcher = new ZKSecretWatcher(conf, zk, keyManager); + ZKUtil.deleteChildrenRecursively(zk, watcher.getKeysParentZNode()); + Integer[] keys = { 1, 2, 3, 4, 5, 6 }; + for (Integer key : keys) { + AuthenticationKey ak = new AuthenticationKey(key, + System.currentTimeMillis() + 600 * 1000, null); + ZKUtil.createWithParents(zk, + ZKUtil.joinZNode(watcher.getKeysParentZNode(), key.toString()), + Writables.getBytes(ak)); + } + Assert.assertNull(keyManager.getCurrentKey()); + watcher.refreshKeys(); + for (Integer key : keys) { + Assert.assertNotNull(keyManager.getKey(key.intValue())); + } + } +} From 03e1451070de1b12f50a509253334c30df9e6879 Mon Sep 17 00:00:00 2001 From: Jonathan M Hsieh Date: Fri, 25 Dec 2015 12:17:51 -0800 Subject: [PATCH 63/72] HBASE-15035 bulkloading hfiles with tags that require splits do not preserve tags --- .../hbase/io/hfile/HFileContextBuilder.java | 2 +- .../mapreduce/LoadIncrementalHFiles.java | 1 + .../mapreduce/TestLoadIncrementalHFiles.java | 68 ++++++++++++- ...dIncrementalHFilesUseSecurityEndPoint.java | 6 ++ .../TestSecureLoadIncrementalHFiles.java | 5 + .../hadoop/hbase/util/HFileTestUtil.java | 99 +++++++++++++++++-- 6 files changed, 170 insertions(+), 11 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java index 4d8bede6baf..770204fca2b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java @@ -37,7 +37,7 @@ public class HFileContextBuilder { /** Whether mvcc is to be included in the Read/Write **/ private boolean includesMvcc = true; /** Whether tags are to be included in the Read/Write **/ - private boolean includesTags; + private boolean includesTags = false; /** Compression algorithm used **/ private Algorithm compression = Algorithm.NONE; /** Whether tags to be compressed or not **/ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 9ac71b9404f..e5550d1082a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -944,6 +944,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)) .withBlockSize(blocksize) .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()) + .withIncludesTags(true) .build(); halfWriter = new StoreFile.WriterBuilder(conf, cacheConf, fs) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index 27bac6f80ee..5c5ecc8e5a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -52,8 +54,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; /** * Test cases for the "load" half of the HFileOutputFormat bulk load @@ -62,6 +66,9 @@ import org.junit.experimental.categories.Category; */ @Category(LargeTests.class) public class TestLoadIncrementalHFiles { + @Rule + public TestName tn = new TestName(); + private static final byte[] QUALIFIER = Bytes.toBytes("myqual"); private static final byte[] FAMILY = Bytes.toBytes("myfam"); private static final String NAMESPACE = "bulkNS"; @@ -82,6 +89,9 @@ public class TestLoadIncrementalHFiles { util.getConfiguration().setInt( LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, MAX_FILES_PER_REGION_PER_FAMILY); + // change default behavior so that tag values are returned with normal rpcs + util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY, + KeyValueCodecWithTags.class.getCanonicalName()); util.startMiniCluster(); setupNamespace(); @@ -226,6 +236,14 @@ public class TestLoadIncrementalHFiles { ); } + private HTableDescriptor buildHTD(TableName tableName, BloomType bloomType) { + HTableDescriptor htd = new HTableDescriptor(tableName); + HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); + familyDesc.setBloomFilterType(bloomType); + htd.addFamily(familyDesc); + return htd; + } + private void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges) throws Exception { runTest(testName, bloomType, null, hfileRanges); @@ -247,10 +265,7 @@ public class TestLoadIncrementalHFiles { private void runTest(String testName, TableName tableName, BloomType bloomType, boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception { - HTableDescriptor htd = new HTableDescriptor(tableName); - HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); - familyDesc.setBloomFilterType(bloomType); - htd.addFamily(familyDesc); + HTableDescriptor htd = buildHTD(tableName, bloomType); runTest(testName, htd, bloomType, preCreateTable, tableSplitKeys, hfileRanges); } @@ -308,6 +323,51 @@ public class TestLoadIncrementalHFiles { } } + /** + * Test that tags survive through a bulk load that needs to split hfiles. + * + * This test depends on the "hbase.client.rpc.codec" = KeyValueCodecWithTags so that the client + * can get tags in the responses. + */ + @Test(timeout = 60000) + public void htestTagsSurviveBulkLoadSplit() throws Exception { + Path dir = util.getDataTestDirOnTestFS(tn.getMethodName()); + FileSystem fs = util.getTestFileSystem(); + dir = dir.makeQualified(fs); + Path familyDir = new Path(dir, Bytes.toString(FAMILY)); + // table has these split points + byte [][] tableSplitKeys = new byte[][] { + Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"), + Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), + }; + + // creating an hfile that has values that span the split points. + byte[] from = Bytes.toBytes("ddd"); + byte[] to = Bytes.toBytes("ooo"); + HFileTestUtil.createHFileWithTags(util.getConfiguration(), fs, + new Path(familyDir, tn.getMethodName()+"_hfile"), + FAMILY, QUALIFIER, from, to, 1000); + int expectedRows = 1000; + + TableName tableName = TableName.valueOf(tn.getMethodName()); + HTableDescriptor htd = buildHTD(tableName, BloomType.NONE); + util.getHBaseAdmin().createTable(htd, tableSplitKeys); + + LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()); + String [] args= {dir.toString(), tableName.toString()}; + loader.run(args); + + Table table = util.getConnection().getTable(tableName); + try { + assertEquals(expectedRows, util.countRows(table)); + HFileTestUtil.verifyTags(table); + } finally { + table.close(); + } + + util.deleteTable(tableName); + } + /** * Test loading into a column family that does not exist. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesUseSecurityEndPoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesUseSecurityEndPoint.java index 6a916f6daf5..11627a10ba1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesUseSecurityEndPoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesUseSecurityEndPoint.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.mapreduce; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.junit.BeforeClass; @@ -33,6 +35,10 @@ public class TestLoadIncrementalHFilesUseSecurityEndPoint extends TestLoadIncrem MAX_FILES_PER_REGION_PER_FAMILY); util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); + // change default behavior so that tag values are returned with normal rpcs + util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY, + KeyValueCodecWithTags.class.getCanonicalName()); + util.startMiniCluster(); setupNamespace(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java index 3e5a1ba2a8b..69554eb836e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java @@ -19,6 +19,8 @@ */ package org.apache.hadoop.hbase.mapreduce; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.AccessControlLists; @@ -51,6 +53,9 @@ public class TestSecureLoadIncrementalHFiles extends TestLoadIncrementalHFiles{ util.getConfiguration().setInt( LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, MAX_FILES_PER_REGION_PER_FAMILY); + // change default behavior so that tag values are returned with normal rpcs + util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY, + KeyValueCodecWithTags.class.getCanonicalName()); util.startMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java index f52837bfc80..c2c938fbd12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java @@ -21,7 +21,14 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.TagType; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -29,6 +36,11 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.StoreFile; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.fail; /** * Utility class for HFile-related testing. @@ -37,15 +49,45 @@ public class HFileTestUtil { /** * Create an HFile with the given number of rows between a given - * start key and end key. + * start key and end key @ family:qualifier. The value will be the key value. + * This file will not have tags. */ public static void createHFile( Configuration configuration, FileSystem fs, Path path, byte[] family, byte[] qualifier, - byte[] startKey, byte[] endKey, int numRows) throws IOException - { - HFileContext meta = new HFileContextBuilder().build(); + byte[] startKey, byte[] endKey, int numRows) throws IOException { + createHFile(configuration, fs, path, family, qualifier, startKey, endKey, + numRows, false); + } + + /** + * Create an HFile with the given number of rows between a given + * start key and end key @ family:qualifier. The value will be the key value. + * This cells will also have a tag whose value is the key. + */ + public static void createHFileWithTags( + Configuration configuration, + FileSystem fs, Path path, + byte[] family, byte[] qualifier, + byte[] startKey, byte[] endKey, int numRows) throws IOException { + createHFile(configuration, fs, path, family, qualifier, startKey, endKey, numRows, true); + } + + /** + * Create an HFile with the given number of rows between a given + * start key and end key @ family:qualifier. + * If withTag is true, we add the rowKey as the tag value for + * tagtype ACL_TAG_TYPE + */ + public static void createHFile( + Configuration configuration, + FileSystem fs, Path path, + byte[] family, byte[] qualifier, + byte[] startKey, byte[] endKey, int numRows, boolean withTag) throws IOException { + HFileContext meta = new HFileContextBuilder() + .withIncludesTags(withTag) + .build(); HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration)) .withPath(fs, path) .withFileContext(meta) @@ -53,8 +95,27 @@ public class HFileTestUtil { long now = System.currentTimeMillis(); try { // subtract 2 since iterateOnSplits doesn't include boundary keys - for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) { + for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) { KeyValue kv = new KeyValue(key, family, qualifier, now, key); + if (withTag) { + // add a tag. Arbitrarily chose mob tag since we have a helper already. + List tags = new ArrayList(); + tags.add(new Tag(TagType.ACL_TAG_TYPE, key)); + kv = new KeyValue(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), + kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), + kv.getTimestamp(), KeyValue.Type.Put, kv.getValueArray(), kv.getValueOffset(), + kv.getValueLength(), tags); + + // verify that the kv has the tag. + byte[] ta = kv.getTagsArray(); + int toff = kv.getTagsOffset(); + int tlen = kv.getTagsLength(); + Tag t = Tag.getTag(ta, toff, tlen, TagType.ACL_TAG_TYPE); + if (t == null) { + throw new IllegalStateException("Tag didn't stick to KV " + kv.toString()); + } + } writer.append(kv); } } finally { @@ -63,4 +124,30 @@ public class HFileTestUtil { writer.close(); } } -} + + /** + * This verifies that each cell has a tag that is equal to its rowkey name. For this to work + * the hbase instance must have HConstants.RPC_CODEC_CONF_KEY set to + * KeyValueCodecWithTags.class.getCanonicalName()); + * @param table table containing tagged cells + * @throws IOException if problems reading table + */ + public static void verifyTags(Table table) throws IOException { + ResultScanner s = table.getScanner(new Scan()); + for (Result r : s) { + for (Cell c : r.listCells()) { + byte[] ta = c.getTagsArray(); + int toff = c.getTagsOffset(); + int tlen = c.getTagsLength(); + Tag t = Tag.getTag(ta, toff, tlen, TagType.ACL_TAG_TYPE); + if (t == null) { + fail(c.toString() + " has null tag"); + continue; + } + byte[] tval = t.getValue(); + assertArrayEquals(c.toString() + " has tag" + Bytes.toString(tval), + r.getRow(), tval); + } + } + } +} \ No newline at end of file From c2256bd2ae39611dec36ad83abde38f14c641192 Mon Sep 17 00:00:00 2001 From: tedyu Date: Fri, 25 Dec 2015 15:41:52 -0800 Subject: [PATCH 64/72] HBASE-15026 The default value of "hbase.regions.slop" in docs is obsolete (Tianyin Xu) --- hbase-common/src/main/resources/hbase-default.xml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 759c3694883..4abefc2bae0 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -592,8 +592,10 @@ possible configurations would overwhelm and obscure the important. hbase.regions.slop - 0.2 - Rebalance if any regionserver has average + (average * slop) regions. + 0.001 + Rebalance if any regionserver has average + (average * slop) regions. + The default value of this parameter is 0.001 in StochasticLoadBalancer (the default load balancer), + while the default is 0.2 in other load balancers (i.e., SimpleLoadBalancer). hbase.server.thread.wakefrequency From 3cc6880904261429d64f3b7472bbd82a46c7cd8d Mon Sep 17 00:00:00 2001 From: tedyu Date: Fri, 25 Dec 2015 18:16:40 -0800 Subject: [PATCH 65/72] HBASE-14800 Expose checkAndMutate via Thrift2 (Josh Elser) --- .../hbase/thrift/generated/AlreadyExists.java | 2 +- .../hbase/thrift/generated/BatchMutation.java | 2 +- .../thrift/generated/ColumnDescriptor.java | 2 +- .../hadoop/hbase/thrift/generated/Hbase.java | 2 +- .../hbase/thrift/generated/IOError.java | 2 +- .../thrift/generated/IllegalArgument.java | 2 +- .../hbase/thrift/generated/Mutation.java | 2 +- .../hbase/thrift/generated/TAppend.java | 2 +- .../hadoop/hbase/thrift/generated/TCell.java | 2 +- .../hbase/thrift/generated/TColumn.java | 2 +- .../hbase/thrift/generated/TIncrement.java | 2 +- .../hbase/thrift/generated/TRegionInfo.java | 2 +- .../hbase/thrift/generated/TRowResult.java | 2 +- .../hadoop/hbase/thrift/generated/TScan.java | 2 +- .../thrift2/ThriftHBaseServiceHandler.java | 15 + .../hadoop/hbase/thrift2/ThriftUtilities.java | 15 + .../hbase/thrift2/generated/TAppend.java | 2 +- .../thrift2/generated/TAuthorization.java | 2 +- .../thrift2/generated/TCellVisibility.java | 2 +- .../hbase/thrift2/generated/TColumn.java | 2 +- .../thrift2/generated/TColumnIncrement.java | 2 +- .../hbase/thrift2/generated/TColumnValue.java | 2 +- .../hbase/thrift2/generated/TCompareOp.java | 64 + .../hbase/thrift2/generated/TDelete.java | 2 +- .../hadoop/hbase/thrift2/generated/TGet.java | 2 +- .../thrift2/generated/THBaseService.java | 1780 ++++++++++++++++- .../hbase/thrift2/generated/THRegionInfo.java | 2 +- .../thrift2/generated/THRegionLocation.java | 2 +- .../hbase/thrift2/generated/TIOError.java | 2 +- .../thrift2/generated/TIllegalArgument.java | 2 +- .../hbase/thrift2/generated/TIncrement.java | 2 +- .../hadoop/hbase/thrift2/generated/TPut.java | 2 +- .../hbase/thrift2/generated/TResult.java | 2 +- .../thrift2/generated/TRowMutations.java | 2 +- .../hadoop/hbase/thrift2/generated/TScan.java | 2 +- .../hbase/thrift2/generated/TServerName.java | 2 +- .../hbase/thrift2/generated/TTimeRange.java | 2 +- .../apache/hadoop/hbase/thrift2/hbase.thrift | 45 + .../TestThriftHBaseServiceHandler.java | 63 +- 39 files changed, 2011 insertions(+), 37 deletions(-) create mode 100644 hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOp.java diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java index ba5952eaf54..b25b59a3ae8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * An AlreadyExists exceptions signals that a table with the specified * name already exists */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class AlreadyExists extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyExists"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java index 718b694f290..44700a6ac5e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A BatchMutation object is used to apply a number of Mutations to a single row. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class BatchMutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BatchMutation"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java index 4790625026c..86433d35c10 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * such as the number of versions, compression settings, etc. It is * used as input when creating a table or adding a column. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class ColumnDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnDescriptor"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index 5e2a25dfab7..744933a1d94 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class Hbase { public interface Iface { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java index fe60ab55151..c9cd85ea541 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * to the Hbase master or an Hbase region server. Also used to return * more general Hbase error conditions. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class IOError extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IOError"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java index a0eeb96ad46..b78fc7b2960 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * An IllegalArgument exception indicates an illegal or invalid * argument was passed into a procedure. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class IllegalArgument extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IllegalArgument"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java index 7401f3dede9..d453c5d6868 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A Mutation object is used to either update or delete a column-value. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class Mutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Mutation"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java index 586b9e3888e..6101ce210f0 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * An Append object is used to specify the parameters for performing the append operation. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java index 29961e4b48c..42033a2e666 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java @@ -40,7 +40,7 @@ import org.slf4j.LoggerFactory; * the timestamp of a cell to a first-class value, making it easy to take * note of temporal data. Cell is used all the way from HStore up to HTable. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TCell implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCell"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java index 8573c8894c2..016338f38f1 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Holds column name and the cell. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java index 7d40b75eea8..574db00650c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * For increments that are not incrementColumnValue * equivalents. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java index bd358cb580f..3e7bc9fa242 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A TRegionInfo contains information about an HTable region. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRegionInfo"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java index e862f3b47c1..5ad1b7464f4 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Holds row name and then a map of columns to cells. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TRowResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowResult"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java index 7a4b51fad2f..bbeee927e32 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A Scan object is used to specify scanner parameters when opening a scanner. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index 3058bf3029e..c59576e00fc 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.thrift2; import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.appendFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.compareOpFromThrift; import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.deleteFromThrift; import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.deletesFromThrift; import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.getFromThrift; @@ -55,6 +56,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.thrift.ThriftMetrics; import org.apache.hadoop.hbase.thrift2.generated.TAppend; +import org.apache.hadoop.hbase.thrift2.generated.TCompareOp; import org.apache.hadoop.hbase.thrift2.generated.TDelete; import org.apache.hadoop.hbase.thrift2.generated.TGet; import org.apache.hadoop.hbase.thrift2.generated.THBaseService; @@ -298,6 +300,19 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { } return Collections.emptyList(); } + + @Override + public boolean checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer family, + ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, TRowMutations rowMutations) + throws TIOError, TException { + try (final Table htable = getTable(table)) { + return htable.checkAndMutate(byteBufferToByteArray(row), byteBufferToByteArray(family), + byteBufferToByteArray(qualifier), compareOpFromThrift(compareOp), + byteBufferToByteArray(value), rowMutationsFromThrift(rowMutations)); + } catch (IOException e) { + throw getTIOError(e); + } + } @Override public boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java index 3251d139b54..d513d6614ae 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; @@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.thrift2.generated.TAppend; import org.apache.hadoop.hbase.thrift2.generated.TColumn; import org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement; import org.apache.hadoop.hbase.thrift2.generated.TColumnValue; +import org.apache.hadoop.hbase.thrift2.generated.TCompareOp; import org.apache.hadoop.hbase.thrift2.generated.TDelete; import org.apache.hadoop.hbase.thrift2.generated.TDeleteType; import org.apache.hadoop.hbase.thrift2.generated.TDurability; @@ -531,4 +533,17 @@ public class ThriftUtilities { default: return null; } } + + public static CompareOp compareOpFromThrift(TCompareOp tCompareOp) { + switch (tCompareOp.getValue()) { + case 0: return CompareOp.LESS; + case 1: return CompareOp.LESS_OR_EQUAL; + case 2: return CompareOp.EQUAL; + case 3: return CompareOp.NOT_EQUAL; + case 4: return CompareOp.GREATER_OR_EQUAL; + case 5: return CompareOp.GREATER; + case 6: return CompareOp.NO_OP; + default: return null; + } + } } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java index ab5b2955ef3..65def3cbfe7 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java index a7eedd04689..269ef7d4fbc 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TAuthorization implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAuthorization"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java index 5812b9479a0..70f413688cd 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TCellVisibility implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCellVisibility"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java index 05d50ec277b..b6a5db8a977 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * in a HBase table by column family and optionally * a column qualifier and timestamp */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java index 4436b8f92ed..aaae0cc27d1 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Represents a single cell and the amount to increment it by */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TColumnIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnIncrement"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java index 763c5a13152..81be1c133f0 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Represents a single cell and its value. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TColumnValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnValue"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOp.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOp.java new file mode 100644 index 00000000000..efbba09cbee --- /dev/null +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOp.java @@ -0,0 +1,64 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.thrift2.generated; + + +import java.util.Map; +import java.util.HashMap; +import org.apache.thrift.TEnum; + +/** + * Thrift wrapper around + * org.apache.hadoop.hbase.filter.CompareFilter$CompareOp. + */ +public enum TCompareOp implements org.apache.thrift.TEnum { + LESS(0), + LESS_OR_EQUAL(1), + EQUAL(2), + NOT_EQUAL(3), + GREATER_OR_EQUAL(4), + GREATER(5), + NO_OP(6); + + private final int value; + + private TCompareOp(int value) { + this.value = value; + } + + /** + * Get the integer value of this enum value, as defined in the Thrift IDL. + */ + public int getValue() { + return value; + } + + /** + * Find a the enum type by its integer value, as defined in the Thrift IDL. + * @return null if the value is not found. + */ + public static TCompareOp findByValue(int value) { + switch (value) { + case 0: + return LESS; + case 1: + return LESS_OR_EQUAL; + case 2: + return EQUAL; + case 3: + return NOT_EQUAL; + case 4: + return GREATER_OR_EQUAL; + case 5: + return GREATER; + case 6: + return NO_OP; + default: + return null; + } + } +} diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java index a57afde2060..6a3cce20113 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java @@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory; * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TDelete implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDelete"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java index 9ed1799cf54..d498aa67aa4 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java @@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory; * If you specify a time range and a timestamp the range is ignored. * Timestamps on TColumns are ignored. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TGet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGet"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java index cbb54fd4dbc..a2e5f90e0e9 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class THBaseService { public interface Iface { @@ -247,6 +247,29 @@ public class THBaseService { */ public List getAllRegionLocations(ByteBuffer table) throws TIOError, org.apache.thrift.TException; + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it mutates the row. + * + * @return true if the row was mutated, false otherwise + * + * @param table to check in and delete from + * + * @param row row to check + * + * @param family column family to check + * + * @param qualifier column qualifier to check + * + * @param compareOp comparison to make on the value + * + * @param value the expected value to be compared against, if not provided the + * check is for the non-existence of the column in question + * + * @param rowMutations row mutations to execute if the value matches + */ + public boolean checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, TRowMutations rowMutations) throws TIOError, org.apache.thrift.TException; + } public interface AsyncIface { @@ -287,6 +310,8 @@ public class THBaseService { public void getAllRegionLocations(ByteBuffer table, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends org.apache.thrift.TServiceClient implements Iface { @@ -794,6 +819,38 @@ public class THBaseService { throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getAllRegionLocations failed: unknown result"); } + public boolean checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, TRowMutations rowMutations) throws TIOError, org.apache.thrift.TException + { + send_checkAndMutate(table, row, family, qualifier, compareOp, value, rowMutations); + return recv_checkAndMutate(); + } + + public void send_checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, TRowMutations rowMutations) throws org.apache.thrift.TException + { + checkAndMutate_args args = new checkAndMutate_args(); + args.setTable(table); + args.setRow(row); + args.setFamily(family); + args.setQualifier(qualifier); + args.setCompareOp(compareOp); + args.setValue(value); + args.setRowMutations(rowMutations); + sendBase("checkAndMutate", args); + } + + public boolean recv_checkAndMutate() throws TIOError, org.apache.thrift.TException + { + checkAndMutate_result result = new checkAndMutate_result(); + receiveBase(result, "checkAndMutate"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.io != null) { + throw result.io; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "checkAndMutate failed: unknown result"); + } + } public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -1466,6 +1523,56 @@ public class THBaseService { } } + public void checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + checkAndMutate_call method_call = new checkAndMutate_call(table, row, family, qualifier, compareOp, value, rowMutations, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class checkAndMutate_call extends org.apache.thrift.async.TAsyncMethodCall { + private ByteBuffer table; + private ByteBuffer row; + private ByteBuffer family; + private ByteBuffer qualifier; + private TCompareOp compareOp; + private ByteBuffer value; + private TRowMutations rowMutations; + public checkAndMutate_call(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.table = table; + this.row = row; + this.family = family; + this.qualifier = qualifier; + this.compareOp = compareOp; + this.value = value; + this.rowMutations = rowMutations; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndMutate", org.apache.thrift.protocol.TMessageType.CALL, 0)); + checkAndMutate_args args = new checkAndMutate_args(); + args.setTable(table); + args.setRow(row); + args.setFamily(family); + args.setQualifier(qualifier); + args.setCompareOp(compareOp); + args.setValue(value); + args.setRowMutations(rowMutations); + args.write(prot); + prot.writeMessageEnd(); + } + + public boolean getResult() throws TIOError, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_checkAndMutate(); + } + } + } public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { @@ -1497,6 +1604,7 @@ public class THBaseService { processMap.put("getScannerResults", new getScannerResults()); processMap.put("getRegionLocation", new getRegionLocation()); processMap.put("getAllRegionLocations", new getAllRegionLocations()); + processMap.put("checkAndMutate", new checkAndMutate()); return processMap; } @@ -1940,6 +2048,31 @@ public class THBaseService { } } + public static class checkAndMutate extends org.apache.thrift.ProcessFunction { + public checkAndMutate() { + super("checkAndMutate"); + } + + public checkAndMutate_args getEmptyArgsInstance() { + return new checkAndMutate_args(); + } + + protected boolean isOneway() { + return false; + } + + public checkAndMutate_result getResult(I iface, checkAndMutate_args args) throws org.apache.thrift.TException { + checkAndMutate_result result = new checkAndMutate_result(); + try { + result.success = iface.checkAndMutate(args.table, args.row, args.family, args.qualifier, args.compareOp, args.value, args.rowMutations); + result.setSuccessIsSet(true); + } catch (TIOError io) { + result.io = io; + } + return result; + } + } + } public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { @@ -1971,6 +2104,7 @@ public class THBaseService { processMap.put("getScannerResults", new getScannerResults()); processMap.put("getRegionLocation", new getRegionLocation()); processMap.put("getAllRegionLocations", new getAllRegionLocations()); + processMap.put("checkAndMutate", new checkAndMutate()); return processMap; } @@ -3009,6 +3143,64 @@ public class THBaseService { } } + public static class checkAndMutate extends org.apache.thrift.AsyncProcessFunction { + public checkAndMutate() { + super("checkAndMutate"); + } + + public checkAndMutate_args getEmptyArgsInstance() { + return new checkAndMutate_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Boolean o) { + checkAndMutate_result result = new checkAndMutate_result(); + result.success = o; + result.setSuccessIsSet(true); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + checkAndMutate_result result = new checkAndMutate_result(); + if (e instanceof TIOError) { + result.io = (TIOError) e; + result.setIoIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, checkAndMutate_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.checkAndMutate(args.table, args.row, args.family, args.qualifier, args.compareOp, args.value, args.rowMutations,resultHandler); + } + } + } public static class exists_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { @@ -21434,4 +21626,1590 @@ public class THBaseService { } + public static class checkAndMutate_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndMutate_args"); + + private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField COMPARE_OP_FIELD_DESC = new org.apache.thrift.protocol.TField("compareOp", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ROW_MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("rowMutations", org.apache.thrift.protocol.TType.STRUCT, (short)7); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new checkAndMutate_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new checkAndMutate_argsTupleSchemeFactory()); + } + + /** + * to check in and delete from + */ + public ByteBuffer table; // required + /** + * row to check + */ + public ByteBuffer row; // required + /** + * column family to check + */ + public ByteBuffer family; // required + /** + * column qualifier to check + */ + public ByteBuffer qualifier; // required + /** + * comparison to make on the value + * + * @see TCompareOp + */ + public TCompareOp compareOp; // required + /** + * the expected value to be compared against, if not provided the + * check is for the non-existence of the column in question + */ + public ByteBuffer value; // required + /** + * row mutations to execute if the value matches + */ + public TRowMutations rowMutations; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + /** + * to check in and delete from + */ + TABLE((short)1, "table"), + /** + * row to check + */ + ROW((short)2, "row"), + /** + * column family to check + */ + FAMILY((short)3, "family"), + /** + * column qualifier to check + */ + QUALIFIER((short)4, "qualifier"), + /** + * comparison to make on the value + * + * @see TCompareOp + */ + COMPARE_OP((short)5, "compareOp"), + /** + * the expected value to be compared against, if not provided the + * check is for the non-existence of the column in question + */ + VALUE((short)6, "value"), + /** + * row mutations to execute if the value matches + */ + ROW_MUTATIONS((short)7, "rowMutations"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE + return TABLE; + case 2: // ROW + return ROW; + case 3: // FAMILY + return FAMILY; + case 4: // QUALIFIER + return QUALIFIER; + case 5: // COMPARE_OP + return COMPARE_OP; + case 6: // VALUE + return VALUE; + case 7: // ROW_MUTATIONS + return ROW_MUTATIONS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.COMPARE_OP, new org.apache.thrift.meta_data.FieldMetaData("compareOp", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TCompareOp.class))); + tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.ROW_MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("rowMutations", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowMutations.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndMutate_args.class, metaDataMap); + } + + public checkAndMutate_args() { + } + + public checkAndMutate_args( + ByteBuffer table, + ByteBuffer row, + ByteBuffer family, + ByteBuffer qualifier, + TCompareOp compareOp, + ByteBuffer value, + TRowMutations rowMutations) + { + this(); + this.table = org.apache.thrift.TBaseHelper.copyBinary(table); + this.row = org.apache.thrift.TBaseHelper.copyBinary(row); + this.family = org.apache.thrift.TBaseHelper.copyBinary(family); + this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier); + this.compareOp = compareOp; + this.value = org.apache.thrift.TBaseHelper.copyBinary(value); + this.rowMutations = rowMutations; + } + + /** + * Performs a deep copy on other. + */ + public checkAndMutate_args(checkAndMutate_args other) { + if (other.isSetTable()) { + this.table = org.apache.thrift.TBaseHelper.copyBinary(other.table); + } + if (other.isSetRow()) { + this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row); + } + if (other.isSetFamily()) { + this.family = org.apache.thrift.TBaseHelper.copyBinary(other.family); + } + if (other.isSetQualifier()) { + this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(other.qualifier); + } + if (other.isSetCompareOp()) { + this.compareOp = other.compareOp; + } + if (other.isSetValue()) { + this.value = org.apache.thrift.TBaseHelper.copyBinary(other.value); + } + if (other.isSetRowMutations()) { + this.rowMutations = new TRowMutations(other.rowMutations); + } + } + + public checkAndMutate_args deepCopy() { + return new checkAndMutate_args(this); + } + + @Override + public void clear() { + this.table = null; + this.row = null; + this.family = null; + this.qualifier = null; + this.compareOp = null; + this.value = null; + this.rowMutations = null; + } + + /** + * to check in and delete from + */ + public byte[] getTable() { + setTable(org.apache.thrift.TBaseHelper.rightSize(table)); + return table == null ? null : table.array(); + } + + public ByteBuffer bufferForTable() { + return org.apache.thrift.TBaseHelper.copyBinary(table); + } + + /** + * to check in and delete from + */ + public checkAndMutate_args setTable(byte[] table) { + this.table = table == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(table, table.length)); + return this; + } + + public checkAndMutate_args setTable(ByteBuffer table) { + this.table = org.apache.thrift.TBaseHelper.copyBinary(table); + return this; + } + + public void unsetTable() { + this.table = null; + } + + /** Returns true if field table is set (has been assigned a value) and false otherwise */ + public boolean isSetTable() { + return this.table != null; + } + + public void setTableIsSet(boolean value) { + if (!value) { + this.table = null; + } + } + + /** + * row to check + */ + public byte[] getRow() { + setRow(org.apache.thrift.TBaseHelper.rightSize(row)); + return row == null ? null : row.array(); + } + + public ByteBuffer bufferForRow() { + return org.apache.thrift.TBaseHelper.copyBinary(row); + } + + /** + * row to check + */ + public checkAndMutate_args setRow(byte[] row) { + this.row = row == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(row, row.length)); + return this; + } + + public checkAndMutate_args setRow(ByteBuffer row) { + this.row = org.apache.thrift.TBaseHelper.copyBinary(row); + return this; + } + + public void unsetRow() { + this.row = null; + } + + /** Returns true if field row is set (has been assigned a value) and false otherwise */ + public boolean isSetRow() { + return this.row != null; + } + + public void setRowIsSet(boolean value) { + if (!value) { + this.row = null; + } + } + + /** + * column family to check + */ + public byte[] getFamily() { + setFamily(org.apache.thrift.TBaseHelper.rightSize(family)); + return family == null ? null : family.array(); + } + + public ByteBuffer bufferForFamily() { + return org.apache.thrift.TBaseHelper.copyBinary(family); + } + + /** + * column family to check + */ + public checkAndMutate_args setFamily(byte[] family) { + this.family = family == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(family, family.length)); + return this; + } + + public checkAndMutate_args setFamily(ByteBuffer family) { + this.family = org.apache.thrift.TBaseHelper.copyBinary(family); + return this; + } + + public void unsetFamily() { + this.family = null; + } + + /** Returns true if field family is set (has been assigned a value) and false otherwise */ + public boolean isSetFamily() { + return this.family != null; + } + + public void setFamilyIsSet(boolean value) { + if (!value) { + this.family = null; + } + } + + /** + * column qualifier to check + */ + public byte[] getQualifier() { + setQualifier(org.apache.thrift.TBaseHelper.rightSize(qualifier)); + return qualifier == null ? null : qualifier.array(); + } + + public ByteBuffer bufferForQualifier() { + return org.apache.thrift.TBaseHelper.copyBinary(qualifier); + } + + /** + * column qualifier to check + */ + public checkAndMutate_args setQualifier(byte[] qualifier) { + this.qualifier = qualifier == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(qualifier, qualifier.length)); + return this; + } + + public checkAndMutate_args setQualifier(ByteBuffer qualifier) { + this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier); + return this; + } + + public void unsetQualifier() { + this.qualifier = null; + } + + /** Returns true if field qualifier is set (has been assigned a value) and false otherwise */ + public boolean isSetQualifier() { + return this.qualifier != null; + } + + public void setQualifierIsSet(boolean value) { + if (!value) { + this.qualifier = null; + } + } + + /** + * comparison to make on the value + * + * @see TCompareOp + */ + public TCompareOp getCompareOp() { + return this.compareOp; + } + + /** + * comparison to make on the value + * + * @see TCompareOp + */ + public checkAndMutate_args setCompareOp(TCompareOp compareOp) { + this.compareOp = compareOp; + return this; + } + + public void unsetCompareOp() { + this.compareOp = null; + } + + /** Returns true if field compareOp is set (has been assigned a value) and false otherwise */ + public boolean isSetCompareOp() { + return this.compareOp != null; + } + + public void setCompareOpIsSet(boolean value) { + if (!value) { + this.compareOp = null; + } + } + + /** + * the expected value to be compared against, if not provided the + * check is for the non-existence of the column in question + */ + public byte[] getValue() { + setValue(org.apache.thrift.TBaseHelper.rightSize(value)); + return value == null ? null : value.array(); + } + + public ByteBuffer bufferForValue() { + return org.apache.thrift.TBaseHelper.copyBinary(value); + } + + /** + * the expected value to be compared against, if not provided the + * check is for the non-existence of the column in question + */ + public checkAndMutate_args setValue(byte[] value) { + this.value = value == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(value, value.length)); + return this; + } + + public checkAndMutate_args setValue(ByteBuffer value) { + this.value = org.apache.thrift.TBaseHelper.copyBinary(value); + return this; + } + + public void unsetValue() { + this.value = null; + } + + /** Returns true if field value is set (has been assigned a value) and false otherwise */ + public boolean isSetValue() { + return this.value != null; + } + + public void setValueIsSet(boolean value) { + if (!value) { + this.value = null; + } + } + + /** + * row mutations to execute if the value matches + */ + public TRowMutations getRowMutations() { + return this.rowMutations; + } + + /** + * row mutations to execute if the value matches + */ + public checkAndMutate_args setRowMutations(TRowMutations rowMutations) { + this.rowMutations = rowMutations; + return this; + } + + public void unsetRowMutations() { + this.rowMutations = null; + } + + /** Returns true if field rowMutations is set (has been assigned a value) and false otherwise */ + public boolean isSetRowMutations() { + return this.rowMutations != null; + } + + public void setRowMutationsIsSet(boolean value) { + if (!value) { + this.rowMutations = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE: + if (value == null) { + unsetTable(); + } else { + setTable((ByteBuffer)value); + } + break; + + case ROW: + if (value == null) { + unsetRow(); + } else { + setRow((ByteBuffer)value); + } + break; + + case FAMILY: + if (value == null) { + unsetFamily(); + } else { + setFamily((ByteBuffer)value); + } + break; + + case QUALIFIER: + if (value == null) { + unsetQualifier(); + } else { + setQualifier((ByteBuffer)value); + } + break; + + case COMPARE_OP: + if (value == null) { + unsetCompareOp(); + } else { + setCompareOp((TCompareOp)value); + } + break; + + case VALUE: + if (value == null) { + unsetValue(); + } else { + setValue((ByteBuffer)value); + } + break; + + case ROW_MUTATIONS: + if (value == null) { + unsetRowMutations(); + } else { + setRowMutations((TRowMutations)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE: + return getTable(); + + case ROW: + return getRow(); + + case FAMILY: + return getFamily(); + + case QUALIFIER: + return getQualifier(); + + case COMPARE_OP: + return getCompareOp(); + + case VALUE: + return getValue(); + + case ROW_MUTATIONS: + return getRowMutations(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE: + return isSetTable(); + case ROW: + return isSetRow(); + case FAMILY: + return isSetFamily(); + case QUALIFIER: + return isSetQualifier(); + case COMPARE_OP: + return isSetCompareOp(); + case VALUE: + return isSetValue(); + case ROW_MUTATIONS: + return isSetRowMutations(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof checkAndMutate_args) + return this.equals((checkAndMutate_args)that); + return false; + } + + public boolean equals(checkAndMutate_args that) { + if (that == null) + return false; + + boolean this_present_table = true && this.isSetTable(); + boolean that_present_table = true && that.isSetTable(); + if (this_present_table || that_present_table) { + if (!(this_present_table && that_present_table)) + return false; + if (!this.table.equals(that.table)) + return false; + } + + boolean this_present_row = true && this.isSetRow(); + boolean that_present_row = true && that.isSetRow(); + if (this_present_row || that_present_row) { + if (!(this_present_row && that_present_row)) + return false; + if (!this.row.equals(that.row)) + return false; + } + + boolean this_present_family = true && this.isSetFamily(); + boolean that_present_family = true && that.isSetFamily(); + if (this_present_family || that_present_family) { + if (!(this_present_family && that_present_family)) + return false; + if (!this.family.equals(that.family)) + return false; + } + + boolean this_present_qualifier = true && this.isSetQualifier(); + boolean that_present_qualifier = true && that.isSetQualifier(); + if (this_present_qualifier || that_present_qualifier) { + if (!(this_present_qualifier && that_present_qualifier)) + return false; + if (!this.qualifier.equals(that.qualifier)) + return false; + } + + boolean this_present_compareOp = true && this.isSetCompareOp(); + boolean that_present_compareOp = true && that.isSetCompareOp(); + if (this_present_compareOp || that_present_compareOp) { + if (!(this_present_compareOp && that_present_compareOp)) + return false; + if (!this.compareOp.equals(that.compareOp)) + return false; + } + + boolean this_present_value = true && this.isSetValue(); + boolean that_present_value = true && that.isSetValue(); + if (this_present_value || that_present_value) { + if (!(this_present_value && that_present_value)) + return false; + if (!this.value.equals(that.value)) + return false; + } + + boolean this_present_rowMutations = true && this.isSetRowMutations(); + boolean that_present_rowMutations = true && that.isSetRowMutations(); + if (this_present_rowMutations || that_present_rowMutations) { + if (!(this_present_rowMutations && that_present_rowMutations)) + return false; + if (!this.rowMutations.equals(that.rowMutations)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_table = true && (isSetTable()); + list.add(present_table); + if (present_table) + list.add(table); + + boolean present_row = true && (isSetRow()); + list.add(present_row); + if (present_row) + list.add(row); + + boolean present_family = true && (isSetFamily()); + list.add(present_family); + if (present_family) + list.add(family); + + boolean present_qualifier = true && (isSetQualifier()); + list.add(present_qualifier); + if (present_qualifier) + list.add(qualifier); + + boolean present_compareOp = true && (isSetCompareOp()); + list.add(present_compareOp); + if (present_compareOp) + list.add(compareOp.getValue()); + + boolean present_value = true && (isSetValue()); + list.add(present_value); + if (present_value) + list.add(value); + + boolean present_rowMutations = true && (isSetRowMutations()); + list.add(present_rowMutations); + if (present_rowMutations) + list.add(rowMutations); + + return list.hashCode(); + } + + @Override + public int compareTo(checkAndMutate_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTable()).compareTo(other.isSetTable()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTable()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table, other.table); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRow()).compareTo(other.isSetRow()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRow()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row, other.row); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFamily()).compareTo(other.isSetFamily()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFamily()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.family, other.family); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetQualifier()).compareTo(other.isSetQualifier()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetQualifier()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.qualifier, other.qualifier); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCompareOp()).compareTo(other.isSetCompareOp()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCompareOp()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compareOp, other.compareOp); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRowMutations()).compareTo(other.isSetRowMutations()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRowMutations()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rowMutations, other.rowMutations); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("checkAndMutate_args("); + boolean first = true; + + sb.append("table:"); + if (this.table == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.table, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("row:"); + if (this.row == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.row, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("family:"); + if (this.family == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.family, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("qualifier:"); + if (this.qualifier == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.qualifier, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("compareOp:"); + if (this.compareOp == null) { + sb.append("null"); + } else { + sb.append(this.compareOp); + } + first = false; + if (!first) sb.append(", "); + sb.append("value:"); + if (this.value == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.value, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("rowMutations:"); + if (this.rowMutations == null) { + sb.append("null"); + } else { + sb.append(this.rowMutations); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (table == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString()); + } + if (row == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString()); + } + if (family == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString()); + } + if (qualifier == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString()); + } + if (compareOp == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'compareOp' was not present! Struct: " + toString()); + } + if (rowMutations == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'rowMutations' was not present! Struct: " + toString()); + } + // check for sub-struct validity + if (rowMutations != null) { + rowMutations.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class checkAndMutate_argsStandardSchemeFactory implements SchemeFactory { + public checkAndMutate_argsStandardScheme getScheme() { + return new checkAndMutate_argsStandardScheme(); + } + } + + private static class checkAndMutate_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.table = iprot.readBinary(); + struct.setTableIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // ROW + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.row = iprot.readBinary(); + struct.setRowIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // FAMILY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.family = iprot.readBinary(); + struct.setFamilyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // QUALIFIER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.qualifier = iprot.readBinary(); + struct.setQualifierIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // COMPARE_OP + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.compareOp = org.apache.hadoop.hbase.thrift2.generated.TCompareOp.findByValue(iprot.readI32()); + struct.setCompareOpIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.value = iprot.readBinary(); + struct.setValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // ROW_MUTATIONS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rowMutations = new TRowMutations(); + struct.rowMutations.read(iprot); + struct.setRowMutationsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.table != null) { + oprot.writeFieldBegin(TABLE_FIELD_DESC); + oprot.writeBinary(struct.table); + oprot.writeFieldEnd(); + } + if (struct.row != null) { + oprot.writeFieldBegin(ROW_FIELD_DESC); + oprot.writeBinary(struct.row); + oprot.writeFieldEnd(); + } + if (struct.family != null) { + oprot.writeFieldBegin(FAMILY_FIELD_DESC); + oprot.writeBinary(struct.family); + oprot.writeFieldEnd(); + } + if (struct.qualifier != null) { + oprot.writeFieldBegin(QUALIFIER_FIELD_DESC); + oprot.writeBinary(struct.qualifier); + oprot.writeFieldEnd(); + } + if (struct.compareOp != null) { + oprot.writeFieldBegin(COMPARE_OP_FIELD_DESC); + oprot.writeI32(struct.compareOp.getValue()); + oprot.writeFieldEnd(); + } + if (struct.value != null) { + oprot.writeFieldBegin(VALUE_FIELD_DESC); + oprot.writeBinary(struct.value); + oprot.writeFieldEnd(); + } + if (struct.rowMutations != null) { + oprot.writeFieldBegin(ROW_MUTATIONS_FIELD_DESC); + struct.rowMutations.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class checkAndMutate_argsTupleSchemeFactory implements SchemeFactory { + public checkAndMutate_argsTupleScheme getScheme() { + return new checkAndMutate_argsTupleScheme(); + } + } + + private static class checkAndMutate_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeBinary(struct.table); + oprot.writeBinary(struct.row); + oprot.writeBinary(struct.family); + oprot.writeBinary(struct.qualifier); + oprot.writeI32(struct.compareOp.getValue()); + struct.rowMutations.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetValue()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetValue()) { + oprot.writeBinary(struct.value); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.table = iprot.readBinary(); + struct.setTableIsSet(true); + struct.row = iprot.readBinary(); + struct.setRowIsSet(true); + struct.family = iprot.readBinary(); + struct.setFamilyIsSet(true); + struct.qualifier = iprot.readBinary(); + struct.setQualifierIsSet(true); + struct.compareOp = org.apache.hadoop.hbase.thrift2.generated.TCompareOp.findByValue(iprot.readI32()); + struct.setCompareOpIsSet(true); + struct.rowMutations = new TRowMutations(); + struct.rowMutations.read(iprot); + struct.setRowMutationsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.value = iprot.readBinary(); + struct.setValueIsSet(true); + } + } + } + + } + + public static class checkAndMutate_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndMutate_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new checkAndMutate_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new checkAndMutate_resultTupleSchemeFactory()); + } + + public boolean success; // required + public TIOError io; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + IO((short)1, "io"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // IO + return IO; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndMutate_result.class, metaDataMap); + } + + public checkAndMutate_result() { + } + + public checkAndMutate_result( + boolean success, + TIOError io) + { + this(); + this.success = success; + setSuccessIsSet(true); + this.io = io; + } + + /** + * Performs a deep copy on other. + */ + public checkAndMutate_result(checkAndMutate_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; + if (other.isSetIo()) { + this.io = new TIOError(other.io); + } + } + + public checkAndMutate_result deepCopy() { + return new checkAndMutate_result(this); + } + + @Override + public void clear() { + setSuccessIsSet(false); + this.success = false; + this.io = null; + } + + public boolean isSuccess() { + return this.success; + } + + public checkAndMutate_result setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + return this; + } + + public void unsetSuccess() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + } + + public TIOError getIo() { + return this.io; + } + + public checkAndMutate_result setIo(TIOError io) { + this.io = io; + return this; + } + + public void unsetIo() { + this.io = null; + } + + /** Returns true if field io is set (has been assigned a value) and false otherwise */ + public boolean isSetIo() { + return this.io != null; + } + + public void setIoIsSet(boolean value) { + if (!value) { + this.io = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Boolean)value); + } + break; + + case IO: + if (value == null) { + unsetIo(); + } else { + setIo((TIOError)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return isSuccess(); + + case IO: + return getIo(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case IO: + return isSetIo(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof checkAndMutate_result) + return this.equals((checkAndMutate_result)that); + return false; + } + + public boolean equals(checkAndMutate_result that) { + if (that == null) + return false; + + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + + boolean this_present_io = true && this.isSetIo(); + boolean that_present_io = true && that.isSetIo(); + if (this_present_io || that_present_io) { + if (!(this_present_io && that_present_io)) + return false; + if (!this.io.equals(that.io)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true; + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_io = true && (isSetIo()); + list.add(present_io); + if (present_io) + list.add(io); + + return list.hashCode(); + } + + @Override + public int compareTo(checkAndMutate_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIo()).compareTo(other.isSetIo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.io, other.io); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("checkAndMutate_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); + sb.append("io:"); + if (this.io == null) { + sb.append("null"); + } else { + sb.append(this.io); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class checkAndMutate_resultStandardSchemeFactory implements SchemeFactory { + public checkAndMutate_resultStandardScheme getScheme() { + return new checkAndMutate_resultStandardScheme(); + } + } + + private static class checkAndMutate_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // IO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.io = new TIOError(); + struct.io.read(iprot); + struct.setIoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + if (struct.io != null) { + oprot.writeFieldBegin(IO_FIELD_DESC); + struct.io.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class checkAndMutate_resultTupleSchemeFactory implements SchemeFactory { + public checkAndMutate_resultTupleScheme getScheme() { + return new checkAndMutate_resultTupleScheme(); + } + } + + private static class checkAndMutate_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetIo()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + if (struct.isSetIo()) { + struct.io.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.io = new TIOError(); + struct.io.read(iprot); + struct.setIoIsSet(true); + } + } + } + + } + } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java index 16f7116748f..2055c2e2a6c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class THRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionInfo"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java index 43a3c76a2d8..8e4649918b2 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class THRegionLocation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionLocation"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java index 6fbacf9ad0a..fa68dbf598b 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * to the HBase master or a HBase region server. Also used to return * more general HBase error conditions. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TIOError extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIOError"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java index f8436418764..c2927df8f11 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * A TIllegalArgument exception indicates an illegal or invalid * argument was passed into a procedure. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TIllegalArgument extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIllegalArgument"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java index f932db4f857..881626fb2a1 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java @@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory; * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java index 0e96dedd9cc..91172daca0e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java @@ -46,7 +46,7 @@ import org.slf4j.LoggerFactory; * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TPut implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPut"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java index bac6f5e11c5..8c03b22bda9 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * if no Result is found, row and columnValues will not be set. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TResult"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java index 72469744504..aae0e54b6d4 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A TRowMutations object is used to apply a number of Mutations to a single row. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TRowMutations implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowMutations"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java index a55216840f2..c4a2bb9926a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * Any timestamps in the columns are ignored, use timeRange to select by timestamp. * Max versions defaults to 1. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java index 0813faa52ce..b870eefaeab 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TServerName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TServerName"); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java index e3ef0b7c824..03b47e612f5 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-11-22") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-25") public class TTimeRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTimeRange"); diff --git a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift index 4ad4cddb7cf..8afeef1cb0f 100644 --- a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift +++ b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift @@ -263,6 +263,21 @@ struct THRegionLocation { 2: required THRegionInfo regionInfo } +/** + * Thrift wrapper around + * org.apache.hadoop.hbase.filter.CompareFilter$CompareOp. + */ +enum TCompareOp { + LESS = 0, + LESS_OR_EQUAL = 1, + EQUAL = 2, + NOT_EQUAL = 3, + GREATER_OR_EQUAL = 4, + GREATER = 5, + NO_OP = 6 +} + + // // Exceptions // @@ -556,4 +571,34 @@ service THBaseService { ) throws ( 1: TIOError io ) + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it mutates the row. + * + * @return true if the row was mutated, false otherwise + */ + bool checkAndMutate( + /** to check in and delete from */ + 1: required binary table, + + /** row to check */ + 2: required binary row, + + /** column family to check */ + 3: required binary family, + + /** column qualifier to check */ + 4: required binary qualifier, + + /** comparison to make on the value */ + 5: required TCompareOp compareOp, + + /** the expected value to be compared against, if not provided the + check is for the non-existence of the column in question */ + 6: binary value, + + /** row mutations to execute if the value matches */ + 7: required TRowMutations rowMutations + ) throws (1: TIOError io) } diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 1575429d86a..568ab0f9bac 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.thrift2.generated.TAppend; import org.apache.hadoop.hbase.thrift2.generated.TColumn; import org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement; import org.apache.hadoop.hbase.thrift2.generated.TColumnValue; +import org.apache.hadoop.hbase.thrift2.generated.TCompareOp; import org.apache.hadoop.hbase.thrift2.generated.TDelete; import org.apache.hadoop.hbase.thrift2.generated.TDeleteType; import org.apache.hadoop.hbase.thrift2.generated.TGet; @@ -67,6 +68,7 @@ import org.junit.experimental.categories.Category; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -125,12 +127,16 @@ public class TestThriftHBaseServiceHandler { for (int i = 0; i < columnValuesA.size(); i++) { TColumnValue a = columnValuesA.get(i); TColumnValue b = columnValuesB.get(i); - assertArrayEquals(a.getFamily(), b.getFamily()); - assertArrayEquals(a.getQualifier(), b.getQualifier()); - assertArrayEquals(a.getValue(), b.getValue()); + assertTColumnValueEqual(a, b); } } + public void assertTColumnValueEqual(TColumnValue a, TColumnValue b) { + assertArrayEquals(a.getFamily(), b.getFamily()); + assertArrayEquals(a.getQualifier(), b.getQualifier()); + assertArrayEquals(a.getValue(), b.getValue()); + } + @BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(); @@ -1087,5 +1093,56 @@ public class TestThriftHBaseServiceHandler { increment = incrementFromThrift(tIncrement); assertEquals(increment.getDurability(), Durability.FSYNC_WAL); } + + @Test + public void testCheckAndMutate() throws Exception { + ThriftHBaseServiceHandler handler = createHandler(); + ByteBuffer table = wrap(tableAname); + ByteBuffer row = wrap("row".getBytes()); + ByteBuffer family = wrap(familyAname); + ByteBuffer qualifier = wrap(qualifierAname); + ByteBuffer value = wrap(valueAname); + + // Create a mutation to write to 'B', our "mutate" of "checkAndMutate" + List columnValuesB = new ArrayList(); + TColumnValue columnValueB = new TColumnValue(family, wrap(qualifierBname), wrap(valueBname)); + columnValuesB.add(columnValueB); + TPut putB = new TPut(row, columnValuesB); + putB.setColumnValues(columnValuesB); + + TRowMutations tRowMutations = new TRowMutations(row, + Arrays. asList(TMutation.put(putB))); + + // Empty table when we begin + TResult result = handler.get(table, new TGet(row)); + assertEquals(0, result.getColumnValuesSize()); + + // checkAndMutate -- condition should fail because the value doesn't exist. + assertFalse("Expected condition to not pass", + handler.checkAndMutate(table, row, family, qualifier, TCompareOp.EQUAL, value, + tRowMutations)); + + List columnValuesA = new ArrayList(); + TColumnValue columnValueA = new TColumnValue(family, qualifier, value); + columnValuesA.add(columnValueA); + + // Put an update 'A' + handler.put(table, new TPut(row, columnValuesA)); + + // Verify that the update is there + result = handler.get(table, new TGet(row)); + assertEquals(1, result.getColumnValuesSize()); + assertTColumnValueEqual(columnValueA, result.getColumnValues().get(0)); + + // checkAndMutate -- condition should pass since we added the value + assertTrue("Expected condition to pass", + handler.checkAndMutate(table, row, family, qualifier, TCompareOp.EQUAL, value, + tRowMutations)); + + result = handler.get(table, new TGet(row)); + assertEquals(2, result.getColumnValuesSize()); + assertTColumnValueEqual(columnValueA, result.getColumnValues().get(0)); + assertTColumnValueEqual(columnValueB, result.getColumnValues().get(1)); + } } From 1098dfd918f76eb6cc6ccd2a6b423e876d782f0d Mon Sep 17 00:00:00 2001 From: stack Date: Mon, 28 Dec 2015 13:27:17 -0800 Subject: [PATCH 66/72] HBASE-15023 Reenable TestShell and TestStochasticLoadBalancer --- .../balancer/TestStochasticLoadBalancer.java | 532 ++++++++++++++++++ .../apache/hadoop/hbase/client/TestShell.java | 39 ++ 2 files changed, 571 insertions(+) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java create mode 100644 hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java new file mode 100644 index 00000000000..7abbeb4d437 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -0,0 +1,532 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Queue; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.master.RackManager; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster; +import org.apache.hadoop.hbase.testclassification.FlakeyTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({FlakeyTests.class, MediumTests.class}) +public class TestStochasticLoadBalancer extends BalancerTestBase { + public static final String REGION_KEY = "testRegion"; + private static final Log LOG = LogFactory.getLog(TestStochasticLoadBalancer.class); + + @Test + public void testKeepRegionLoad() throws Exception { + + ServerName sn = ServerName.valueOf("test:8080", 100); + int numClusterStatusToAdd = 20000; + for (int i = 0; i < numClusterStatusToAdd; i++) { + ServerLoad sl = mock(ServerLoad.class); + + RegionLoad rl = mock(RegionLoad.class); + when(rl.getStores()).thenReturn(i); + + Map regionLoadMap = + new TreeMap(Bytes.BYTES_COMPARATOR); + regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl); + when(sl.getRegionsLoad()).thenReturn(regionLoadMap); + + ClusterStatus clusterStatus = mock(ClusterStatus.class); + when(clusterStatus.getServers()).thenReturn(Arrays.asList(sn)); + when(clusterStatus.getLoad(sn)).thenReturn(sl); + + loadBalancer.setClusterStatus(clusterStatus); + } + assertTrue(loadBalancer.loads.get(REGION_KEY) != null); + assertTrue(loadBalancer.loads.get(REGION_KEY).size() == 15); + + Queue loads = loadBalancer.loads.get(REGION_KEY); + int i = 0; + while(loads.size() > 0) { + RegionLoad rl = loads.remove(); + assertEquals(i + (numClusterStatusToAdd - 15), rl.getStores()); + i ++; + } + } + + /** + * Test the load balancing algorithm. + * + * Invariant is that all servers should be hosting either floor(average) or + * ceiling(average) + * + * @throws Exception + */ + @Test + public void testBalanceCluster() throws Exception { + + for (int[] mockCluster : clusterStateMocks) { + Map> servers = mockClusterServers(mockCluster); + List list = convertToList(servers); + LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); + List plans = loadBalancer.balanceCluster(servers); + List balancedCluster = reconcile(list, plans, servers); + LOG.info("Mock Balance : " + printMock(balancedCluster)); + assertClusterAsBalanced(balancedCluster); + List secondPlans = loadBalancer.balanceCluster(servers); + assertNull(secondPlans); + for (Map.Entry> entry : servers.entrySet()) { + returnRegions(entry.getValue()); + returnServer(entry.getKey()); + } + } + + } + + @Test + public void testMoveCost() throws Exception { + Configuration conf = HBaseConfiguration.create(); + StochasticLoadBalancer.CostFunction + costFunction = new StochasticLoadBalancer.MoveCostFunction(conf); + for (int[] mockCluster : clusterStateMocks) { + BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster); + costFunction.init(cluster); + double cost = costFunction.cost(); + assertEquals(0.0f, cost, 0.001); + + // cluster region number is smaller than maxMoves=600 + cluster.setNumRegions(200); + cluster.setNumMovedRegions(10); + cost = costFunction.cost(); + assertEquals(0.05f, cost, 0.001); + cluster.setNumMovedRegions(100); + cost = costFunction.cost(); + assertEquals(0.5f, cost, 0.001); + cluster.setNumMovedRegions(200); + cost = costFunction.cost(); + assertEquals(1.0f, cost, 0.001); + + + // cluster region number is bigger than maxMoves=2500 + cluster.setNumRegions(10000); + cluster.setNumMovedRegions(250); + cost = costFunction.cost(); + assertEquals(0.1f, cost, 0.001); + cluster.setNumMovedRegions(1250); + cost = costFunction.cost(); + assertEquals(0.5f, cost, 0.001); + cluster.setNumMovedRegions(2500); + cost = costFunction.cost(); + assertEquals(1.0f, cost, 0.01); + } + } + + @Test + public void testSkewCost() { + Configuration conf = HBaseConfiguration.create(); + StochasticLoadBalancer.CostFunction + costFunction = new StochasticLoadBalancer.RegionCountSkewCostFunction(conf); + for (int[] mockCluster : clusterStateMocks) { + costFunction.init(mockCluster(mockCluster)); + double cost = costFunction.cost(); + assertTrue(cost >= 0); + assertTrue(cost <= 1.01); + } + + costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1})); + assertEquals(0,costFunction.cost(), 0.01); + costFunction.init(mockCluster(new int[]{0, 0, 0, 1, 1})); + assertEquals(0, costFunction.cost(), 0.01); + costFunction.init(mockCluster(new int[]{0, 0, 1, 1, 1})); + assertEquals(0, costFunction.cost(), 0.01); + costFunction.init(mockCluster(new int[]{0, 1, 1, 1, 1})); + assertEquals(0, costFunction.cost(), 0.01); + costFunction.init(mockCluster(new int[]{1, 1, 1, 1, 1})); + assertEquals(0, costFunction.cost(), 0.01); + costFunction.init(mockCluster(new int[]{10000, 0, 0, 0, 0})); + assertEquals(1, costFunction.cost(), 0.01); + } + + @Test + public void testTableSkewCost() { + Configuration conf = HBaseConfiguration.create(); + StochasticLoadBalancer.CostFunction + costFunction = new StochasticLoadBalancer.TableSkewCostFunction(conf); + for (int[] mockCluster : clusterStateMocks) { + BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster); + costFunction.init(cluster); + double cost = costFunction.cost(); + assertTrue(cost >= 0); + assertTrue(cost <= 1.01); + } + } + + @Test + public void testCostFromArray() { + Configuration conf = HBaseConfiguration.create(); + StochasticLoadBalancer.CostFromRegionLoadFunction + costFunction = new StochasticLoadBalancer.MemstoreSizeCostFunction(conf); + costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1})); + + double[] statOne = new double[100]; + for (int i =0; i < 100; i++) { + statOne[i] = 10; + } + assertEquals(0, costFunction.costFromArray(statOne), 0.01); + + double[] statTwo= new double[101]; + for (int i =0; i < 100; i++) { + statTwo[i] = 0; + } + statTwo[100] = 100; + assertEquals(1, costFunction.costFromArray(statTwo), 0.01); + + double[] statThree = new double[200]; + for (int i =0; i < 100; i++) { + statThree[i] = (0); + statThree[i+100] = 100; + } + assertEquals(0.5, costFunction.costFromArray(statThree), 0.01); + } + + @Test(timeout = 60000) + public void testLosingRs() throws Exception { + int numNodes = 3; + int numRegions = 20; + int numRegionsPerServer = 3; //all servers except one + int replication = 1; + int numTables = 2; + + Map> serverMap = + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + List list = convertToList(serverMap); + + + List plans = loadBalancer.balanceCluster(serverMap); + assertNotNull(plans); + + // Apply the plan to the mock cluster. + List balancedCluster = reconcile(list, plans, serverMap); + + assertClusterAsBalanced(balancedCluster); + + ServerName sn = serverMap.keySet().toArray(new ServerName[serverMap.size()])[0]; + + ServerName deadSn = ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 100); + + serverMap.put(deadSn, new ArrayList(0)); + + plans = loadBalancer.balanceCluster(serverMap); + assertNull(plans); + } + + @Test + public void testReplicaCost() { + Configuration conf = HBaseConfiguration.create(); + StochasticLoadBalancer.CostFunction + costFunction = new StochasticLoadBalancer.RegionReplicaHostCostFunction(conf); + for (int[] mockCluster : clusterStateMocks) { + BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster); + costFunction.init(cluster); + double cost = costFunction.cost(); + assertTrue(cost >= 0); + assertTrue(cost <= 1.01); + } + } + + @Test + public void testReplicaCostForReplicas() { + Configuration conf = HBaseConfiguration.create(); + StochasticLoadBalancer.CostFunction + costFunction = new StochasticLoadBalancer.RegionReplicaHostCostFunction(conf); + + int [] servers = new int[] {3,3,3,3,3}; + TreeMap> clusterState = mockClusterServers(servers); + + BaseLoadBalancer.Cluster cluster; + + cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null); + costFunction.init(cluster); + double costWithoutReplicas = costFunction.cost(); + assertEquals(0, costWithoutReplicas, 0); + + // replicate the region from first server to the last server + HRegionInfo replica1 = RegionReplicaUtil.getRegionInfoForReplica( + clusterState.firstEntry().getValue().get(0),1); + clusterState.lastEntry().getValue().add(replica1); + + cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null); + costFunction.init(cluster); + double costWith1ReplicaDifferentServer = costFunction.cost(); + + assertEquals(0, costWith1ReplicaDifferentServer, 0); + + // add a third replica to the last server + HRegionInfo replica2 = RegionReplicaUtil.getRegionInfoForReplica(replica1, 2); + clusterState.lastEntry().getValue().add(replica2); + + cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null); + costFunction.init(cluster); + double costWith1ReplicaSameServer = costFunction.cost(); + + assertTrue(costWith1ReplicaDifferentServer < costWith1ReplicaSameServer); + + // test with replication = 4 for following: + + HRegionInfo replica3; + Iterator>> it; + Entry> entry; + + clusterState = mockClusterServers(servers); + it = clusterState.entrySet().iterator(); + entry = it.next(); //first server + HRegionInfo hri = entry.getValue().get(0); + replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1); + replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2); + replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3); + entry.getValue().add(replica1); + entry.getValue().add(replica2); + it.next().getValue().add(replica3); //2nd server + + cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null); + costFunction.init(cluster); + double costWith3ReplicasSameServer = costFunction.cost(); + + clusterState = mockClusterServers(servers); + hri = clusterState.firstEntry().getValue().get(0); + replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1); + replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2); + replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3); + + clusterState.firstEntry().getValue().add(replica1); + clusterState.lastEntry().getValue().add(replica2); + clusterState.lastEntry().getValue().add(replica3); + + cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null); + costFunction.init(cluster); + double costWith2ReplicasOnTwoServers = costFunction.cost(); + + assertTrue(costWith2ReplicasOnTwoServers < costWith3ReplicasSameServer); + } + + @Test + public void testNeedsBalanceForColocatedReplicas() { + // check for the case where there are two hosts and with one rack, and where + // both the replicas are hosted on the same server + List regions = randomRegions(1); + ServerName s1 = ServerName.valueOf("host1", 1000, 11111); + ServerName s2 = ServerName.valueOf("host11", 1000, 11111); + Map> map = new HashMap>(); + map.put(s1, regions); + regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1)); + // until the step above s1 holds two replicas of a region + regions = randomRegions(1); + map.put(s2, regions); + assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null, null))); + // check for the case where there are two hosts on the same rack and there are two racks + // and both the replicas are on the same rack + map.clear(); + regions = randomRegions(1); + List regionsOnS2 = new ArrayList(1); + regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1)); + map.put(s1, regions); + map.put(s2, regionsOnS2); + // add another server so that the cluster has some host on another rack + map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1)); + assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null, + new ForTestRackManagerOne()))); + } + + @Test (timeout = 60000) + public void testSmallCluster() { + int numNodes = 10; + int numRegions = 1000; + int numRegionsPerServer = 40; //all servers except one + int replication = 1; + int numTables = 10; + testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); + } + + @Test (timeout = 60000) + public void testSmallCluster2() { + int numNodes = 20; + int numRegions = 2000; + int numRegionsPerServer = 40; //all servers except one + int replication = 1; + int numTables = 10; + testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); + } + + @Test (timeout = 60000) + public void testSmallCluster3() { + int numNodes = 20; + int numRegions = 2000; + int numRegionsPerServer = 1; // all servers except one + int replication = 1; + int numTables = 10; + /* fails because of max moves */ + testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, false, false); + } + + @Test (timeout = 800000) + public void testMidCluster() { + int numNodes = 100; + int numRegions = 10000; + int numRegionsPerServer = 60; // all servers except one + int replication = 1; + int numTables = 40; + testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); + } + + @Test (timeout = 800000) + public void testMidCluster2() { + int numNodes = 200; + int numRegions = 100000; + int numRegionsPerServer = 40; // all servers except one + int replication = 1; + int numTables = 400; + testWithCluster(numNodes, + numRegions, + numRegionsPerServer, + replication, + numTables, + false, /* num large num regions means may not always get to best balance with one run */ + false); + } + + + @Test (timeout = 800000) + public void testMidCluster3() { + int numNodes = 100; + int numRegions = 2000; + int numRegionsPerServer = 9; // all servers except one + int replication = 1; + int numTables = 110; + testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); + // TODO(eclark): Make sure that the tables are well distributed. + } + + @Test + public void testLargeCluster() { + int numNodes = 1000; + int numRegions = 100000; //100 regions per RS + int numRegionsPerServer = 80; //all servers except one + int numTables = 100; + int replication = 1; + testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); + } + + @Test (timeout = 800000) + public void testRegionReplicasOnSmallCluster() { + int numNodes = 10; + int numRegions = 1000; + int replication = 3; // 3 replicas per region + int numRegionsPerServer = 80; //all regions are mostly balanced + int numTables = 10; + testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); + } + + @Test (timeout = 800000) + public void testRegionReplicationOnMidClusterSameHosts() { + conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec + conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); + loadBalancer.setConf(conf); + int numHosts = 100; + int numRegions = 100 * 100; + int replication = 3; // 3 replicas per region + int numRegionsPerServer = 5; + int numTables = 10; + Map> serverMap = + createServerMap(numHosts, numRegions, numRegionsPerServer, replication, numTables); + int numNodesPerHost = 4; + + // create a new map with 4 RS per host. + Map> newServerMap = new TreeMap>(serverMap); + for (Map.Entry> entry : serverMap.entrySet()) { + for (int i=1; i < numNodesPerHost; i++) { + ServerName s1 = entry.getKey(); + ServerName s2 = ServerName.valueOf(s1.getHostname(), s1.getPort() + i, 1); // create an RS for the same host + newServerMap.put(s2, new ArrayList()); + } + } + + testWithCluster(newServerMap, null, true, true); + } + + private static class ForTestRackManager extends RackManager { + int numRacks; + public ForTestRackManager(int numRacks) { + this.numRacks = numRacks; + } + @Override + public String getRack(ServerName server) { + return "rack_" + (server.hashCode() % numRacks); + } + } + + private static class ForTestRackManagerOne extends RackManager { + @Override + public String getRack(ServerName server) { + return server.getHostname().endsWith("1") ? "rack1" : "rack2"; + } + } + + @Test (timeout = 800000) + public void testRegionReplicationOnMidClusterWithRacks() { + conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L); + conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec + loadBalancer.setConf(conf); + int numNodes = 30; + int numRegions = numNodes * 30; + int replication = 3; // 3 replicas per region + int numRegionsPerServer = 28; + int numTables = 10; + int numRacks = 4; // all replicas should be on a different rack + Map> serverMap = + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + RackManager rm = new ForTestRackManager(numRacks); + + testWithCluster(serverMap, rm, false, true); + } + +} diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java new file mode 100644 index 00000000000..976ba45078d --- /dev/null +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java @@ -0,0 +1,39 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.jruby.embed.PathType; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, LargeTests.class }) +public class TestShell extends AbstractTestShell { + + @Test + public void testRunShellTests() throws IOException { + System.setProperty("shell.test.exclude", "replication_admin_test.rb"); + // Start all ruby tests + jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + } + +} From c4173777dd776b23c9a09b3eca827690fc6ee840 Mon Sep 17 00:00:00 2001 From: tedyu Date: Tue, 29 Dec 2015 06:25:14 -0800 Subject: [PATCH 67/72] HBASE-14867 SimpleRegionNormalizer needs to have better heuristics to trigger merge operation --- .../normalizer/SimpleRegionNormalizer.java | 108 ++++++++---------- .../TestSimpleRegionNormalizer.java | 43 ++++++- 2 files changed, 91 insertions(+), 60 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 659b3dc02d1..fe6034b4deb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -27,8 +27,11 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Triple; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; import java.util.List; /** @@ -66,6 +69,22 @@ public class SimpleRegionNormalizer implements RegionNormalizer { this.masterServices = masterServices; } + /* + * This comparator compares the region size. + * The second element in the triple is region size while the 3rd element + * is the index of the region in the underlying List + */ + private Comparator> regionSizeComparator = + new Comparator>() { + @Override + public int compare(Triple pair, + Triple pair2) { + long sz = pair.getSecond(); + long sz2 = pair2.getSecond(); + return (sz < sz2) ? -1 : ((sz == sz2) ? 0 : 1); + } + }; + /** * Computes next most "urgent" normalization action on the table. * Action may be either a split, or a merge, or no action. @@ -76,7 +95,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer { @Override public NormalizationPlan computePlanForTable(TableName table) throws HBaseIOException { if (table == null || table.isSystemTable()) { - LOG.debug("Normalization of table " + table + " isn't allowed"); + LOG.debug("Normalization of system table " + table + " isn't allowed"); return EmptyNormalizationPlan.getInstance(); } @@ -95,57 +114,18 @@ public class SimpleRegionNormalizer implements RegionNormalizer { ", number of regions: " + tableRegions.size()); long totalSizeMb = 0; - Pair largestRegion = new Pair<>(); - - // A is a smallest region, B is it's smallest neighbor - Pair smallestRegion = new Pair<>(); - int smallestRegionIndex = 0; + ArrayList> regionsWithSize = + new ArrayList>(tableRegions.size()); for (int i = 0; i < tableRegions.size(); i++) { HRegionInfo hri = tableRegions.get(i); long regionSize = getRegionSize(hri); + regionsWithSize.add(new Triple(hri, regionSize, i)); totalSizeMb += regionSize; - - if (largestRegion.getFirst() == null || regionSize > largestRegion.getSecond()) { - largestRegion.setFirst(hri); - largestRegion.setSecond(regionSize); - } - - if (smallestRegion.getFirst() == null || regionSize < smallestRegion.getSecond()) { - smallestRegion.setFirst(hri); - smallestRegion.setSecond(regionSize); - smallestRegionIndex = i; - } } + Collections.sort(regionsWithSize, regionSizeComparator); - // now get smallest neighbor of smallest region - long leftNeighborSize = -1; - - if (smallestRegionIndex > 0) { - leftNeighborSize = getRegionSize(tableRegions.get(smallestRegionIndex - 1)); - } - - long rightNeighborSize = -1; - if (smallestRegionIndex < tableRegions.size() - 1) { - rightNeighborSize = getRegionSize(tableRegions.get(smallestRegionIndex + 1)); - } - - Pair smallestNeighborOfSmallestRegion; - if (leftNeighborSize == -1) { - smallestNeighborOfSmallestRegion = - new Pair<>(tableRegions.get(smallestRegionIndex + 1), rightNeighborSize); - } else if (rightNeighborSize == -1) { - smallestNeighborOfSmallestRegion = - new Pair<>(tableRegions.get(smallestRegionIndex - 1), leftNeighborSize); - } else { - if (leftNeighborSize < rightNeighborSize) { - smallestNeighborOfSmallestRegion = - new Pair<>(tableRegions.get(smallestRegionIndex - 1), leftNeighborSize); - } else { - smallestNeighborOfSmallestRegion = - new Pair<>(tableRegions.get(smallestRegionIndex + 1), rightNeighborSize); - } - } + Triple largestRegion = regionsWithSize.get(tableRegions.size()-1); double avgRegionSize = totalSizeMb / (double) tableRegions.size(); @@ -159,19 +139,31 @@ public class SimpleRegionNormalizer implements RegionNormalizer { + largestRegion.getFirst().getRegionNameAsString() + " has size " + largestRegion.getSecond() + ", more than 2 times than avg size, splitting"); return new SplitNormalizationPlan(largestRegion.getFirst(), null); - } else { - if (smallestRegion.getSecond() + smallestNeighborOfSmallestRegion.getSecond() - < avgRegionSize) { - LOG.debug("Table " + table + ", smallest region size: " + smallestRegion.getSecond() - + " and its smallest neighbor size: " + smallestNeighborOfSmallestRegion.getSecond() - + ", less than the avg size, merging them"); - return new MergeNormalizationPlan(smallestRegion.getFirst(), - smallestNeighborOfSmallestRegion.getFirst()); - } else { - LOG.debug("No normalization needed, regions look good for table: " + table); - return EmptyNormalizationPlan.getInstance(); - } } + int candidateIdx = 0; + // look for two successive entries whose indices are adjacent + while (candidateIdx < tableRegions.size()-1) { + if (Math.abs(regionsWithSize.get(candidateIdx).getThird() - + regionsWithSize.get(candidateIdx + 1).getThird()) == 1) { + break; + } + candidateIdx++; + } + if (candidateIdx == tableRegions.size()-1) { + LOG.debug("No neighboring regions found for table: " + table); + return EmptyNormalizationPlan.getInstance(); + } + Triple candidateRegion = regionsWithSize.get(candidateIdx); + Triple candidateRegion2 = regionsWithSize.get(candidateIdx+1); + if (candidateRegion.getSecond() + candidateRegion2.getSecond() < avgRegionSize) { + LOG.debug("Table " + table + ", smallest region size: " + candidateRegion.getSecond() + + " and its smallest neighbor size: " + candidateRegion2.getSecond() + + ", less than the avg size, merging them"); + return new MergeNormalizationPlan(candidateRegion.getFirst(), + candidateRegion2.getFirst()); + } + LOG.debug("No normalization needed, regions look good for table: " + table); + return EmptyNormalizationPlan.getInstance(); } private long getRegionSize(HRegionInfo hri) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index a87fa29310e..e22532cc0b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -141,8 +141,8 @@ public class TestSimpleRegionNormalizer { hris.add(hri4); regionSizes.put(hri4.getRegionName(), 15); - HRegionInfo hri5 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); - hris.add(hri4); + HRegionInfo hri5 = new HRegionInfo(testTable, Bytes.toBytes("eee"), Bytes.toBytes("fff")); + hris.add(hri5); regionSizes.put(hri5.getRegionName(), 16); setupMocksForNormalizer(regionSizes, hris); @@ -153,6 +153,45 @@ public class TestSimpleRegionNormalizer { assertEquals(hri3, ((MergeNormalizationPlan) plan).getSecondRegion()); } + // Test for situation illustrated in HBASE-14867 + @Test + public void testMergeOfSecondSmallestRegions() throws HBaseIOException { + TableName testTable = TableName.valueOf("testMergeOfSmallRegions"); + List hris = new ArrayList<>(); + Map regionSizes = new HashMap<>(); + + HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); + hris.add(hri1); + regionSizes.put(hri1.getRegionName(), 1); + + HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); + hris.add(hri2); + regionSizes.put(hri2.getRegionName(), 10000); + + HRegionInfo hri3 = new HRegionInfo(testTable, Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); + hris.add(hri3); + regionSizes.put(hri3.getRegionName(), 10000); + + HRegionInfo hri4 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); + hris.add(hri4); + regionSizes.put(hri4.getRegionName(), 10000); + + HRegionInfo hri5 = new HRegionInfo(testTable, Bytes.toBytes("eee"), Bytes.toBytes("fff")); + hris.add(hri5); + regionSizes.put(hri5.getRegionName(), 2700); + + HRegionInfo hri6 = new HRegionInfo(testTable, Bytes.toBytes("fff"), Bytes.toBytes("ggg")); + hris.add(hri6); + regionSizes.put(hri6.getRegionName(), 2700); + + setupMocksForNormalizer(regionSizes, hris); + NormalizationPlan plan = normalizer.computePlanForTable(testTable); + + assertTrue(plan instanceof MergeNormalizationPlan); + assertEquals(hri5, ((MergeNormalizationPlan) plan).getFirstRegion()); + assertEquals(hri6, ((MergeNormalizationPlan) plan).getSecondRegion()); + } + @Test public void testMergeOfSmallNonAdjacentRegions() throws HBaseIOException { TableName testTable = TableName.valueOf("testMergeOfSmallRegions"); From 9cc01b4029d8159dd528b0dc413464285d07fbfc Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Sat, 26 Dec 2015 16:47:53 -0600 Subject: [PATCH 68/72] HBASE-15011 turn off the jdk8 javadoc linter. Conflicts: pom.xml --- pom.xml | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 95996502384..50dc90c85a1 100644 --- a/pom.xml +++ b/pom.xml @@ -431,6 +431,11 @@ + + org.apache.maven.plugins + maven-javadoc-plugin + ${javadoc.version} + org.apache.maven.plugins maven-remote-resources-plugin @@ -1744,6 +1749,26 @@ --> + + build-with-jdk8 + + 1.8 + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + + -Xdoclint:none + + + + + + jacoco @@ -2432,7 +2457,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 2.10.3 + ${javadoc.version} **/generated org.apache.hadoop.hbase.generated.master:org.apache.hadoop.hbase.protobuf.generated From 64553236a972f6a32dbff4c2275e99780b06d744 Mon Sep 17 00:00:00 2001 From: Ashish Singhi Date: Sat, 26 Dec 2015 22:21:33 +0530 Subject: [PATCH 69/72] HBASE-15018 Inconsistent way of handling TimeoutException in the rpc client implementations Signed-off-by: Sean Busbey --- .../hadoop/hbase/ipc/AbstractRpcClient.java | 39 ++++++- .../hadoop/hbase/ipc/AsyncRpcClient.java | 5 +- .../hadoop/hbase/ipc/RpcClientImpl.java | 102 +++++++----------- .../hadoop/hbase/ipc/AbstractTestIPC.java | 18 ++++ 4 files changed, 92 insertions(+), 72 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 6f5e78aeb7d..e33ef3a1479 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -24,6 +24,13 @@ import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; + +import java.io.IOException; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.SocketTimeoutException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -34,6 +41,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.KeyValueCodec; +import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -41,10 +49,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PoolMap; import org.apache.hadoop.io.compress.CompressionCodec; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.SocketAddress; - /** * Provides the basics for a RpcClient implementation like configuration and Logging. */ @@ -257,6 +261,33 @@ public abstract class AbstractRpcClient implements RpcClient { return new BlockingRpcChannelImplementation(this, sn, ticket, defaultOperationTimeout); } + /** + * Takes an Exception and the address we were trying to connect to and return an IOException with + * the input exception as the cause. The new exception provides the stack trace of the place where + * the exception is thrown and some extra diagnostics information. If the exception is + * ConnectException or SocketTimeoutException, return a new one of the same type; Otherwise return + * an IOException. + * @param addr target address + * @param exception the relevant exception + * @return an exception to throw + */ + protected IOException wrapException(InetSocketAddress addr, Exception exception) { + if (exception instanceof ConnectException) { + // connection refused; include the host:port in the error + return (ConnectException) new ConnectException("Call to " + addr + + " failed on connection exception: " + exception).initCause(exception); + } else if (exception instanceof SocketTimeoutException) { + return (SocketTimeoutException) new SocketTimeoutException("Call to " + addr + + " failed because " + exception).initCause(exception); + } else if (exception instanceof ConnectionClosingException) { + return (ConnectionClosingException) new ConnectionClosingException("Call to " + addr + + " failed on local exception: " + exception).initCause(exception); + } else { + return (IOException) new IOException("Call to " + addr + " failed on local exception: " + + exception).initCause(exception); + } + } + /** * Blocking rpc channel that goes via hbase rpc. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java index 60e9add9f13..e12e298f682 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java @@ -251,10 +251,11 @@ public class AsyncRpcClient extends AbstractRpcClient { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { - throw new IOException(e.getCause()); + throw wrapException(addr, (Exception) e.getCause()); } } catch (TimeoutException e) { - throw new CallTimeoutException(promise.toString()); + CallTimeoutException cte = new CallTimeoutException(promise.toString()); + throw wrapException(addr, cte); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java index 21b257fa80d..1509f5414c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java @@ -19,11 +19,37 @@ package org.apache.hadoop.hbase.ipc; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.Message.Builder; -import com.google.protobuf.RpcCallback; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.Closeable; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InterruptedIOException; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.net.SocketFactory; +import javax.security.sasl.SaslException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -70,37 +96,11 @@ import org.apache.htrace.Span; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; -import javax.net.SocketFactory; -import javax.security.sasl.SaslException; -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.Closeable; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InterruptedIOException; -import java.io.OutputStream; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.net.SocketAddress; -import java.net.SocketTimeoutException; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Message.Builder; +import com.google.protobuf.RpcCallback; /** * Does RPC against a cluster. Manages connections per regionserver in the cluster. @@ -1266,36 +1266,6 @@ public class RpcClientImpl extends AbstractRpcClient { } - /** - * Take an IOException and the address we were trying to connect to - * and return an IOException with the input exception as the cause. - * The new exception provides the stack trace of the place where - * the exception is thrown and some extra diagnostics information. - * If the exception is ConnectException or SocketTimeoutException, - * return a new one of the same type; Otherwise return an IOException. - * - * @param addr target address - * @param exception the relevant exception - * @return an exception to throw - */ - protected IOException wrapException(InetSocketAddress addr, - IOException exception) { - if (exception instanceof ConnectException) { - //connection refused; include the host:port in the error - return (ConnectException)new ConnectException( - "Call to " + addr + " failed on connection exception: " + exception).initCause(exception); - } else if (exception instanceof SocketTimeoutException) { - return (SocketTimeoutException)new SocketTimeoutException("Call to " + addr + - " failed because " + exception).initCause(exception); - } else if (exception instanceof ConnectionClosingException){ - return (ConnectionClosingException) new ConnectionClosingException( - "Call to " + addr + " failed on local exception: " + exception).initCause(exception); - } else { - return (IOException)new IOException("Call to " + addr + " failed on local exception: " + - exception).initCause(exception); - } - } - /** * Interrupt the connections to the given ip:port server. This should be called if the server * is known as actually dead. This will not prevent current operation to be retried, and, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java index 5df1edc29c8..ffe4d40600b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -26,8 +26,10 @@ import static org.mockito.Mockito.verify; import static org.mockito.internal.verification.VerificationModeFactory.times; import java.io.IOException; +import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.List; @@ -42,6 +44,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.MetricsConnection; +import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyRequestProto; @@ -363,4 +366,19 @@ public abstract class AbstractTestIPC { rpcServer.stop(); } } + + @Test + public void testWrapException() throws Exception { + AbstractRpcClient client = + (AbstractRpcClient) RpcClientFactory.createClient(CONF, "AbstractTestIPC"); + final InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 0); + assertTrue(client.wrapException(address, new ConnectException()) instanceof ConnectException); + assertTrue(client.wrapException(address, + new SocketTimeoutException()) instanceof SocketTimeoutException); + assertTrue(client.wrapException(address, new ConnectionClosingException( + "Test AbstractRpcClient#wrapException")) instanceof ConnectionClosingException); + assertTrue(client + .wrapException(address, new CallTimeoutException("Test AbstractRpcClient#wrapException")) + .getCause() instanceof CallTimeoutException); + } } From bb744b4090168cce95ab59c58a44722683130477 Mon Sep 17 00:00:00 2001 From: Gary Helmling Date: Wed, 23 Dec 2015 18:49:58 -0800 Subject: [PATCH 70/72] HBASE-15038 ExportSnapshot should support separate configs for source and destination --- .../hadoop/hbase/snapshot/ExportSnapshot.java | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index c5f97c3cfab..3d75291bd9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -87,6 +87,10 @@ import org.apache.hadoop.util.ToolRunner; @InterfaceStability.Evolving public class ExportSnapshot extends Configured implements Tool { public static final String NAME = "exportsnapshot"; + /** Configuration prefix for overrides for the source filesystem */ + public static final String CONF_SOURCE_PREFIX = NAME + ".from."; + /** Configuration prefix for overrides for the destination filesystem */ + public static final String CONF_DEST_PREFIX = NAME + ".to."; private static final Log LOG = LogFactory.getLog(ExportSnapshot.class); @@ -141,6 +145,9 @@ public class ExportSnapshot extends Configured implements Tool { @Override public void setup(Context context) throws IOException { Configuration conf = context.getConfiguration(); + Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); + Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); + verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); filesGroup = conf.get(CONF_FILES_GROUP); @@ -155,15 +162,15 @@ public class ExportSnapshot extends Configured implements Tool { testFailures = conf.getBoolean(CONF_TEST_FAILURE, false); try { - conf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); - inputFs = FileSystem.get(inputRoot.toUri(), conf); + srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); + inputFs = FileSystem.get(inputRoot.toUri(), srcConf); } catch (IOException e) { throw new IOException("Could not get the input FileSystem with root=" + inputRoot, e); } try { - conf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); - outputFs = FileSystem.get(outputRoot.toUri(), conf); + destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); + outputFs = FileSystem.get(outputRoot.toUri(), destConf); } catch (IOException e) { throw new IOException("Could not get the output FileSystem with root="+ outputRoot, e); } @@ -780,8 +787,12 @@ public class ExportSnapshot extends Configured implements Tool { job.setNumReduceTasks(0); // Acquire the delegation Tokens + Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { inputRoot, outputRoot }, conf); + new Path[] { inputRoot }, srcConf); + Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), + new Path[] { outputRoot }, destConf); // Run the MR Job if (!job.waitForCompletion(true)) { @@ -904,11 +915,13 @@ public class ExportSnapshot extends Configured implements Tool { targetName = snapshotName; } - conf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); - FileSystem inputFs = FileSystem.get(inputRoot.toUri(), conf); + Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); + srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); + FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf); LOG.debug("inputFs=" + inputFs.getUri().toString() + " inputRoot=" + inputRoot); - conf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); - FileSystem outputFs = FileSystem.get(outputRoot.toUri(), conf); + Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); + destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); + FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); LOG.debug("outputFs=" + outputFs.getUri().toString() + " outputRoot=" + outputRoot.toString()); boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false); @@ -995,7 +1008,7 @@ public class ExportSnapshot extends Configured implements Tool { // Step 4 - Verify snapshot integrity if (verifyTarget) { LOG.info("Verify snapshot integrity"); - verifySnapshot(conf, outputFs, outputRoot, outputSnapshotDir); + verifySnapshot(destConf, outputFs, outputRoot, outputSnapshotDir); } LOG.info("Export Completed: " + targetName); From 2797adf45f1e4de20fb017834f583ae390fdb5a2 Mon Sep 17 00:00:00 2001 From: tedyu Date: Mon, 4 Jan 2016 07:19:13 -0800 Subject: [PATCH 71/72] HBASE-14987 Compaction marker whose region name doesn't match current region's needs to be handled --- .../hadoop/hbase/protobuf/ProtobufUtil.java | 9 +++- .../hadoop/hbase/regionserver/HRegion.java | 44 +++++++++++++++---- .../hbase/regionserver/TestHRegion.java | 26 +++++++++-- 3 files changed, 65 insertions(+), 14 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 47305ffb9f5..5f5c3a32942 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -2609,12 +2609,19 @@ public final class ProtobufUtil { public static CompactionDescriptor toCompactionDescriptor(HRegionInfo info, byte[] family, List inputPaths, List outputPaths, Path storeDir) { + return toCompactionDescriptor(info, null, family, inputPaths, outputPaths, storeDir); + } + + @SuppressWarnings("deprecation") + public static CompactionDescriptor toCompactionDescriptor(HRegionInfo info, byte[] regionName, + byte[] family, List inputPaths, List outputPaths, Path storeDir) { // compaction descriptor contains relative paths. // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() .setTableName(ByteStringer.wrap(info.getTableName())) - .setEncodedRegionName(ByteStringer.wrap(info.getEncodedNameAsBytes())) + .setEncodedRegionName(ByteStringer.wrap( + regionName == null ? info.getEncodedNameAsBytes() : regionName)) .setFamilyName(ByteStringer.wrap(family)) .setStoreHomeDir(storeDir.getName()); //make relative for (Path inputPath : inputPaths) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 7d511018946..e4149309fde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4132,11 +4132,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi continue; } } + boolean checkRowWithinBoundary = false; // Check this edit is for this region. if (!Bytes.equals(key.getEncodedRegionName(), this.getRegionInfo().getEncodedNameAsBytes())) { - skippedEdits++; - continue; + checkRowWithinBoundary = true; } boolean flush = false; @@ -4144,11 +4144,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Check this edit is for me. Also, guard against writing the special // METACOLUMN info such as HBASE::CACHEFLUSH entries if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { - //this is a special edit, we should handle it - CompactionDescriptor compaction = WALEdit.getCompaction(cell); - if (compaction != null) { - //replay the compaction - replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE); + // if region names don't match, skipp replaying compaction marker + if (!checkRowWithinBoundary) { + //this is a special edit, we should handle it + CompactionDescriptor compaction = WALEdit.getCompaction(cell); + if (compaction != null) { + //replay the compaction + replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE); + } } skippedEdits++; continue; @@ -4164,6 +4167,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi skippedEdits++; continue; } + if (checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(), + cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) { + LOG.warn("Row of " + cell + " is not within region boundary"); + skippedEdits++; + continue; + } // Now, figure if we should skip this edit. if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily() .getName())) { @@ -4234,8 +4243,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi void replayWALCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, boolean removeFiles, long replaySeqId) throws IOException { - checkTargetRegion(compaction.getEncodedRegionName().toByteArray(), - "Compaction marker from WAL ", compaction); + try { + checkTargetRegion(compaction.getEncodedRegionName().toByteArray(), + "Compaction marker from WAL ", compaction); + } catch (WrongRegionException wre) { + if (RegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { + // skip the compaction marker since it is not for this region + return; + } + throw wre; + } synchronized (writestate) { if (replaySeqId < lastReplayedOpenRegionSeqId) { @@ -6663,6 +6680,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi (Bytes.compareTo(info.getEndKey(), row) > 0)); } + public static boolean rowIsInRange(HRegionInfo info, final byte [] row, final int offset, + final short length) { + return ((info.getStartKey().length == 0) || + (Bytes.compareTo(info.getStartKey(), 0, info.getStartKey().length, + row, offset, length) <= 0)) && + ((info.getEndKey().length == 0) || + (Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0)); + } + /** * Merge two HRegions. The regions must be adjacent and must not overlap. * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 52447e72ea9..1edc2518eab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -828,6 +828,10 @@ public class TestHRegion { @Test public void testRecoveredEditsReplayCompaction() throws Exception { + testRecoveredEditsReplayCompaction(false); + testRecoveredEditsReplayCompaction(true); + } + public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception { String method = name.getMethodName(); TableName tableName = TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); @@ -873,9 +877,17 @@ public class TestHRegion { Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath()); + byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes(); + byte[] fakeEncodedNameAsBytes = new byte [encodedNameAsBytes.length]; + for (int i=0; i < encodedNameAsBytes.length; i++) { + // Mix the byte array to have a new encodedName + fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1); + } + CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region - .getRegionInfo(), family, storeFiles, Lists.newArrayList(newFile), region - .getRegionFileSystem().getStoreDir(Bytes.toString(family))); + .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, + storeFiles, Lists.newArrayList(newFile), + region.getRegionFileSystem().getStoreDir(Bytes.toString(family))); WALUtil.writeCompactionMarker(region.getWAL(), this.region.getTableDesc(), this.region.getRegionInfo(), compactionDescriptor, region.getMVCC()); @@ -897,14 +909,20 @@ public class TestHRegion { region.getTableDesc(); region.getRegionInfo(); region.close(); - region = HRegion.openHRegion(region, null); + try { + region = HRegion.openHRegion(region, null); + } catch (WrongRegionException wre) { + fail("Matching encoded region name should not have produced WrongRegionException"); + } // now check whether we have only one store file, the compacted one Collection sfs = region.getStore(family).getStorefiles(); for (StoreFile sf : sfs) { LOG.info(sf.getPath()); } - assertEquals(1, region.getStore(family).getStorefilesCount()); + if (!mismatchedRegionName) { + assertEquals(1, region.getStore(family).getStorefilesCount()); + } files = FSUtils.listStatus(fs, tmpDir); assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0); From 407cda4a0c4bc1338af175299372a4b1976bb44a Mon Sep 17 00:00:00 2001 From: tedyu Date: Tue, 5 Jan 2016 06:24:42 -0800 Subject: [PATCH 72/72] HBASE-15058 AssignmentManager should account for unsuccessful split correctly which initially passes quota check --- .../hbase/master/AssignmentManager.java | 46 ++++++++++++++----- 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index fd5d192e1a2..83f9d722404 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -3720,7 +3720,7 @@ public class AssignmentManager extends ZooKeeperListener { invokeAssign(hri, false); } - private String onRegionSplit(ServerName sn, TransitionCode code, + private String checkInStateForSplit(ServerName sn, final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) { final RegionState rs_p = regionStates.getRegionState(p); RegionState rs_a = regionStates.getRegionState(a); @@ -3730,6 +3730,32 @@ public class AssignmentManager extends ZooKeeperListener { && (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) { return "Not in state good for split"; } + return ""; + } + + private String onRegionSplitReverted(ServerName sn, + final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) { + String s = checkInStateForSplit(sn, p, a, b); + if (!org.apache.commons.lang.StringUtils.isEmpty(s)) { + return s; + } + regionOnline(p, sn); + regionOffline(a); + regionOffline(b); + + if (getTableStateManager().isTableState(p.getTable(), + ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + invokeUnAssign(p); + } + return null; + } + + private String onRegionSplit(ServerName sn, TransitionCode code, + final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) { + String s = checkInStateForSplit(sn, p, a, b); + if (!org.apache.commons.lang.StringUtils.isEmpty(s)) { + return s; + } regionStates.updateRegionState(a, State.SPLITTING_NEW, sn); regionStates.updateRegionState(b, State.SPLITTING_NEW, sn); @@ -3765,15 +3791,6 @@ public class AssignmentManager extends ZooKeeperListener { LOG.info("Failed to record split region " + p.getShortNameToLog()); return "Failed to record the splitting in meta"; } - } else if (code == TransitionCode.SPLIT_REVERTED) { - regionOnline(p, sn); - regionOffline(a); - regionOffline(b); - - if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { - invokeUnAssign(p); - } } return null; } @@ -4338,11 +4355,18 @@ public class AssignmentManager extends ZooKeeperListener { } catch (IOException exp) { errorMsg = StringUtils.stringifyException(exp); } + break; case SPLIT_PONR: case SPLIT: + errorMsg = + onRegionSplit(serverName, code, hri, HRegionInfo.convert(transition.getRegionInfo(1)), + HRegionInfo.convert(transition.getRegionInfo(2))); + break; + case SPLIT_REVERTED: errorMsg = - onRegionSplit(serverName, code, hri, HRegionInfo.convert(transition.getRegionInfo(1)), + onRegionSplitReverted(serverName, hri, + HRegionInfo.convert(transition.getRegionInfo(1)), HRegionInfo.convert(transition.getRegionInfo(2))); if (org.apache.commons.lang.StringUtils.isEmpty(errorMsg)) { try {