From a46134bffc21da91853c4288375e9d4bd00a177d Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 12 Oct 2016 12:13:41 -0700 Subject: [PATCH] HBASE-16731 Inconsistent results from the Get/Scan if we use the empty FilterList (ChiaPing Tsai) --- .../org/apache/hadoop/hbase/client/Get.java | 5 + .../org/apache/hadoop/hbase/client/Query.java | 37 +++- .../org/apache/hadoop/hbase/client/Scan.java | 36 +--- .../hadoop/hbase/protobuf/ProtobufUtil.java | 10 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 9 +- .../apache/hadoop/hbase/client/TestScan.java | 42 +++- .../protobuf/generated/ClientProtos.java | 183 ++++++++++++++--- .../src/main/protobuf/Client.proto | 1 + .../protobuf/generated/ClientProtos.java | 185 +++++++++++++++--- hbase-protocol/src/main/protobuf/Client.proto | 1 + .../hadoop/hbase/regionserver/HRegion.java | 4 +- .../hbase/regionserver/RSRpcServices.java | 4 +- .../hbase/client/TestFromClientSide.java | 43 +++- .../hbase/protobuf/TestProtobufUtil.java | 2 +- 14 files changed, 456 insertions(+), 106 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index efb437f0132..c19a6a93323 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -110,6 +110,7 @@ public class Get extends Query this.storeOffset = get.getRowOffsetPerColumnFamily(); this.tr = get.getTimeRange(); this.checkExistenceOnly = get.isCheckExistenceOnly(); + this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); Map> fams = get.getFamilyMap(); for (Map.Entry> entry : fams.entrySet()) { byte [] fam = entry.getKey(); @@ -251,6 +252,10 @@ public class Get extends Query return this; } + public Get setLoadColumnFamiliesOnDemand(boolean value) { + return (Get) super.setLoadColumnFamiliesOnDemand(value); + } + /** * Set the maximum number of values to return per row per Column Family * @param limit the maximum number of values returned / row / CF diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index d6befb7a573..a2874991023 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -44,7 +44,7 @@ public abstract class Query extends OperationWithAttributes { protected int targetReplicaId = -1; protected Consistency consistency = Consistency.STRONG; protected Map colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - + protected Boolean loadColumnFamiliesOnDemand = null; /** * @return Filter */ @@ -179,6 +179,41 @@ public abstract class Query extends OperationWithAttributes { IsolationLevel.fromBytes(attr); } + /** + * Set the value indicating whether loading CFs on demand should be allowed (cluster + * default is false). On-demand CF loading doesn't load column families until necessary, e.g. + * if you filter on one column, the other column family data will be loaded only for the rows + * that are included in result, not all rows like in normal case. + * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true, + * this can deliver huge perf gains when there's a cf with lots of data; however, it can + * also lead to some inconsistent results, as follows: + * - if someone does a concurrent update to both column families in question you may get a row + * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } + * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan + * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, + * { video => "my dog" } }. + * - if there's a concurrent split and you have more than 2 column families, some rows may be + * missing some column families. + */ + public Query setLoadColumnFamiliesOnDemand(boolean value) { + this.loadColumnFamiliesOnDemand = value; + return this; + } + + /** + * Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null. + */ + public Boolean getLoadColumnFamiliesOnDemandValue() { + return this.loadColumnFamiliesOnDemand; + } + + /** + * Get the logical value indicating whether on-demand CF loading should be allowed. + */ + public boolean doLoadColumnFamiliesOnDemand() { + return (this.loadColumnFamiliesOnDemand != null) + && this.loadColumnFamiliesOnDemand; + } /** * Get versions of columns only within the specified timestamp range, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 71a31db63ea..4f2728b0a41 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -137,7 +137,6 @@ public class Scan extends Query { private TimeRange tr = new TimeRange(); private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); - private Boolean loadColumnFamiliesOnDemand = null; private Boolean asyncPrefetch = null; /** @@ -273,6 +272,7 @@ public class Scan extends Query { this.asyncPrefetch = false; this.consistency = get.getConsistency(); this.setIsolationLevel(get.getIsolationLevel()); + this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); for (Map.Entry attr : get.getAttributesMap().entrySet()) { setAttribute(attr.getKey(), attr.getValue()); } @@ -753,40 +753,8 @@ public class Scan extends Query { return allowPartialResults; } - /** - * Set the value indicating whether loading CFs on demand should be allowed (cluster - * default is false). On-demand CF loading doesn't load column families until necessary, e.g. - * if you filter on one column, the other column family data will be loaded only for the rows - * that are included in result, not all rows like in normal case. - * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true, - * this can deliver huge perf gains when there's a cf with lots of data; however, it can - * also lead to some inconsistent results, as follows: - * - if someone does a concurrent update to both column families in question you may get a row - * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } - * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan - * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, - * { video => "my dog" } }. - * - if there's a concurrent split and you have more than 2 column families, some rows may be - * missing some column families. - */ public Scan setLoadColumnFamiliesOnDemand(boolean value) { - this.loadColumnFamiliesOnDemand = value; - return this; - } - - /** - * Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null. - */ - public Boolean getLoadColumnFamiliesOnDemandValue() { - return this.loadColumnFamiliesOnDemand; - } - - /** - * Get the logical value indicating whether on-demand CF loading should be allowed. - */ - public boolean doLoadColumnFamiliesOnDemand() { - return (this.loadColumnFamiliesOnDemand != null) - && this.loadColumnFamiliesOnDemand.booleanValue(); + return (Scan) super.setLoadColumnFamiliesOnDemand(value); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 2d7853457a0..330348d14b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -417,6 +417,9 @@ public final class ProtobufUtil { if (proto.hasConsistency()) { get.setConsistency(toConsistency(proto.getConsistency())); } + if (proto.hasLoadColumnFamiliesOnDemand()) { + get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand()); + } return get; } @@ -837,7 +840,7 @@ public final class ProtobufUtil { } Boolean loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue(); if (loadColumnFamiliesOnDemand != null) { - scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand.booleanValue()); + scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); } scanBuilder.setMaxVersions(scan.getMaxVersions()); for (Entry cftr : scan.getColumnFamilyTimeRange().entrySet()) { @@ -1062,6 +1065,11 @@ public final class ProtobufUtil { builder.setConsistency(toConsistency(get.getConsistency())); } + Boolean loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); + if (loadColumnFamiliesOnDemand != null) { + builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); + } + return builder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 2f72eaa7338..53101de8cf6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -524,6 +524,9 @@ public final class ProtobufUtil { if (proto.hasConsistency()) { get.setConsistency(toConsistency(proto.getConsistency())); } + if (proto.hasLoadColumnFamiliesOnDemand()) { + get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand()); + } return get; } @@ -944,7 +947,7 @@ public final class ProtobufUtil { } Boolean loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue(); if (loadColumnFamiliesOnDemand != null) { - scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand.booleanValue()); + scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); } scanBuilder.setMaxVersions(scan.getMaxVersions()); for (Entry cftr : scan.getColumnFamilyTimeRange().entrySet()) { @@ -1169,6 +1172,10 @@ public final class ProtobufUtil { builder.setConsistency(toConsistency(get.getConsistency())); } + Boolean loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); + if (loadColumnFamiliesOnDemand != null) { + builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); + } return builder.build(); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java index fc1e5266e66..a687384deb2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -24,18 +24,22 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.Arrays; import java.util.Set; - import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import org.junit.Test; import org.junit.experimental.categories.Category; + + // TODO: cover more test cases @Category({ClientTests.class, SmallTests.class}) public class TestScan { @@ -57,6 +61,40 @@ public class TestScan { Assert.assertEquals(3, scan2.getAttributesMap().size()); } + @Test + public void testGetToScan() throws IOException { + Get get = new Get(Bytes.toBytes(1)); + get.setCacheBlocks(true) + .setConsistency(Consistency.TIMELINE) + .setFilter(new FilterList()) + .setId("get") + .setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLoadColumnFamiliesOnDemand(false) + .setMaxResultsPerColumnFamily(1000) + .setMaxVersions(9999) + .setRowOffsetPerColumnFamily(5) + .setTimeRange(0, 13) + .setAttribute("att_v0", Bytes.toBytes("att_v0")) + .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123); + Scan scan = new Scan(get); + assertEquals(get.getCacheBlocks(), scan.getCacheBlocks()); + assertEquals(get.getConsistency(), scan.getConsistency()); + assertEquals(get.getFilter(), scan.getFilter()); + assertEquals(get.getId(), scan.getId()); + assertEquals(get.getIsolationLevel(), scan.getIsolationLevel()); + assertEquals(get.getLoadColumnFamiliesOnDemandValue(), scan.getLoadColumnFamiliesOnDemandValue()); + assertEquals(get.getMaxResultsPerColumnFamily(), scan.getMaxResultsPerColumnFamily()); + assertEquals(get.getMaxVersions(), scan.getMaxVersions()); + assertEquals(get.getRowOffsetPerColumnFamily(), scan.getRowOffsetPerColumnFamily()); + assertEquals(get.getTimeRange().getMin(), scan.getTimeRange().getMin()); + assertEquals(get.getTimeRange().getMax(), scan.getTimeRange().getMax()); + assertTrue(Bytes.equals(get.getAttribute("att_v0"), scan.getAttribute("att_v0"))); + assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin(), + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); + assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax(), + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); + } + @Test public void testScanAttributes() { Scan scan = new Scan(); diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java index 80b858ffa69..bfd196e9906 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java @@ -2058,6 +2058,23 @@ public final class ClientProtos { */ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder getCfTimeRangeOrBuilder( int index); + + /** + *
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + boolean hasLoadColumnFamiliesOnDemand(); + /** + *
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + boolean getLoadColumnFamiliesOnDemand(); } /** *
@@ -2088,6 +2105,7 @@ public final class ClientProtos {
       existenceOnly_ = false;
       consistency_ = 0;
       cfTimeRange_ = java.util.Collections.emptyList();
+      loadColumnFamiliesOnDemand_ = false;
     }
 
     @java.lang.Override
@@ -2212,6 +2230,11 @@ public final class ClientProtos {
                   input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.PARSER, extensionRegistry));
               break;
             }
+            case 112: {
+              bitField0_ |= 0x00000200;
+              loadColumnFamiliesOnDemand_ = input.readBool();
+              break;
+            }
           }
         }
       } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2509,6 +2532,29 @@ public final class ClientProtos {
       return cfTimeRange_.get(index);
     }
 
+    public static final int LOAD_COLUMN_FAMILIES_ON_DEMAND_FIELD_NUMBER = 14;
+    private boolean loadColumnFamiliesOnDemand_;
+    /**
+     * 
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + public boolean hasLoadColumnFamiliesOnDemand() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + *
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + public boolean getLoadColumnFamiliesOnDemand() { + return loadColumnFamiliesOnDemand_; + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; @@ -2585,6 +2631,9 @@ public final class ClientProtos { for (int i = 0; i < cfTimeRange_.size(); i++) { output.writeMessage(13, cfTimeRange_.get(i)); } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBool(14, loadColumnFamiliesOnDemand_); + } unknownFields.writeTo(output); } @@ -2641,6 +2690,10 @@ public final class ClientProtos { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeMessageSize(13, cfTimeRange_.get(i)); } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBoolSize(14, loadColumnFamiliesOnDemand_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -2708,6 +2761,11 @@ public final class ClientProtos { } result = result && getCfTimeRangeList() .equals(other.getCfTimeRangeList()); + result = result && (hasLoadColumnFamiliesOnDemand() == other.hasLoadColumnFamiliesOnDemand()); + if (hasLoadColumnFamiliesOnDemand()) { + result = result && (getLoadColumnFamiliesOnDemand() + == other.getLoadColumnFamiliesOnDemand()); + } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -2769,6 +2827,11 @@ public final class ClientProtos { hash = (37 * hash) + CF_TIME_RANGE_FIELD_NUMBER; hash = (53 * hash) + getCfTimeRangeList().hashCode(); } + if (hasLoadColumnFamiliesOnDemand()) { + hash = (37 * hash) + LOAD_COLUMN_FAMILIES_ON_DEMAND_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( + getLoadColumnFamiliesOnDemand()); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -2943,6 +3006,8 @@ public final class ClientProtos { } else { cfTimeRangeBuilder_.clear(); } + loadColumnFamiliesOnDemand_ = false; + bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -3038,6 +3103,10 @@ public final class ClientProtos { } else { result.cfTimeRange_ = cfTimeRangeBuilder_.build(); } + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000200; + } + result.loadColumnFamiliesOnDemand_ = loadColumnFamiliesOnDemand_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3185,6 +3254,9 @@ public final class ClientProtos { } } } + if (other.hasLoadColumnFamiliesOnDemand()) { + setLoadColumnFamiliesOnDemand(other.getLoadColumnFamiliesOnDemand()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -4442,6 +4514,54 @@ public final class ClientProtos { } return cfTimeRangeBuilder_; } + + private boolean loadColumnFamiliesOnDemand_ ; + /** + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + public boolean hasLoadColumnFamiliesOnDemand() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + public boolean getLoadColumnFamiliesOnDemand() { + return loadColumnFamiliesOnDemand_; + } + /** + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + public Builder setLoadColumnFamiliesOnDemand(boolean value) { + bitField0_ |= 0x00001000; + loadColumnFamiliesOnDemand_ = value; + onChanged(); + return this; + } + /** + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ * + * optional bool load_column_families_on_demand = 14; + */ + public Builder clearLoadColumnFamiliesOnDemand() { + bitField0_ = (bitField0_ & ~0x00001000); + loadColumnFamiliesOnDemand_ = false; + onChanged(); + return this; + } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); @@ -40264,7 +40384,7 @@ public final class ClientProtos { "o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" + "label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" + "ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" + - "ualifier\030\002 \003(\014\"\226\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + + "ualifier\030\002 \003(\014\"\276\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + "olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" + "te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" + "ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" + @@ -40274,11 +40394,12 @@ public final class ClientProtos { " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" + "onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" + "\006STRONG\0226\n\rcf_time_range\030\r \003(\0132\037.hbase.p" + - "b.ColumnFamilyTimeRange\"\203\001\n\006Result\022\034\n\004ce" + + "b.ColumnFamilyTimeRange\022&\n\036load_column_f" + + "amilies_on_demand\030\016 \001(\010\"\203\001\n\006Result\022\034\n\004ce" + "ll\030\001 \003(\0132\016.hbase.pb.Cell\022\035\n\025associated_c" + "ell_count\030\002 \001(\005\022\016\n\006exists\030\003 \001(\010\022\024\n\005stale" + - "\030\004 \001(\010:\005false\022\026\n\007partial\030\005 \001(\010:\005false\"S\n" + - "\nGetRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R", + "\030\004 \001(\010:\005false\022\026\n\007partial\030\005 \001(\010:\005false\"S\n", + "\nGetRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" + "egionSpecifier\022\032\n\003get\030\002 \002(\0132\r.hbase.pb.G" + "et\"/\n\013GetResponse\022 \n\006result\030\001 \001(\0132\020.hbas" + "e.pb.Result\"\222\001\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016" + @@ -40287,8 +40408,8 @@ public final class ClientProtos { "\n\ncomparator\030\005 \002(\0132\024.hbase.pb.Comparator" + "\"\364\006\n\rMutationProto\022\013\n\003row\030\001 \001(\014\0229\n\013mutat" + "e_type\030\002 \001(\0162$.hbase.pb.MutationProto.Mu" + - "tationType\0229\n\014column_value\030\003 \003(\0132#.hbase" + - ".pb.MutationProto.ColumnValue\022\021\n\ttimesta", + "tationType\0229\n\014column_value\030\003 \003(\0132#.hbase", + ".pb.MutationProto.ColumnValue\022\021\n\ttimesta" + "mp\030\004 \001(\004\022*\n\tattribute\030\005 \003(\0132\027.hbase.pb.N" + "ameBytesPair\022C\n\ndurability\030\006 \001(\0162\".hbase" + ".pb.MutationProto.Durability:\013USE_DEFAUL" + @@ -40297,8 +40418,8 @@ public final class ClientProtos { "ce\030\t \001(\004\032\371\001\n\013ColumnValue\022\016\n\006family\030\001 \002(\014" + "\022K\n\017qualifier_value\030\002 \003(\01322.hbase.pb.Mut" + "ationProto.ColumnValue.QualifierValue\032\214\001" + - "\n\016QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005v" + - "alue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\0227\n\013delete_", + "\n\016QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005v", + "alue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\0227\n\013delete_" + "type\030\004 \001(\0162\".hbase.pb.MutationProto.Dele" + "teType\022\014\n\004tags\030\005 \001(\014\"W\n\nDurability\022\017\n\013US" + "E_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002" + @@ -40307,8 +40428,8 @@ public final class ClientProtos { "\002\022\n\n\006DELETE\020\003\"p\n\nDeleteType\022\026\n\022DELETE_ON" + "E_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020" + "\001\022\021\n\rDELETE_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VE" + - "RSION\020\003\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(" + - "\0132\031.hbase.pb.RegionSpecifier\022)\n\010mutation", + "RSION\020\003\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(", + "\0132\031.hbase.pb.RegionSpecifier\022)\n\010mutation" + "\030\002 \002(\0132\027.hbase.pb.MutationProto\022&\n\tcondi" + "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" + "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" + @@ -40317,8 +40438,8 @@ public final class ClientProtos { "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" + "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" + "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" + - "r\022\'\n\ntime_range\030\006 \001(\0132\023.hbase.pb.TimeRan" + - "ge\022\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blo", + "r\022\'\n\ntime_range\030\006 \001(\0132\023.hbase.pb.TimeRan", + "ge\022\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blo" + "cks\030\010 \001(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017m" + "ax_result_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(" + "\r\022\024\n\014store_offset\030\014 \001(\r\022&\n\036load_column_f" + @@ -40327,8 +40448,8 @@ public final class ClientProtos { " \001(\0162\025.hbase.pb.Consistency:\006STRONG\022\017\n\007c" + "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " + "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" + - "lumnFamilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006r" + - "egion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034", + "lumnFamilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006r", + "egion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034" + "\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_" + "id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclos" + "e_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037" + @@ -40337,8 +40458,8 @@ public final class ClientProtos { "_metrics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n" + "\014ScanResponse\022\030\n\020cells_per_result\030\001 \003(\r\022" + "\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(" + - "\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase." + - "pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag", + "\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.", + "pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag" + "_per_result\030\007 \003(\010\022\036\n\026more_results_in_reg" + "ion\030\010 \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014" + "scan_metrics\030\n \001(\0132\025.hbase.pb.ScanMetric" + @@ -40347,8 +40468,8 @@ public final class ClientProtos { "path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReque" + "st.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022+\n" + "\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationTok" + - "en\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(" + - "\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014", + "en\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(", + "\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014" + "\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n" + "\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\niden" + "tifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind\030\003" + @@ -40357,8 +40478,8 @@ public final class ClientProtos { "ableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Regi" + "onSpecifier\"-\n\027PrepareBulkLoadResponse\022\022" + "\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadReq" + - "uest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\013" + - "2\031.hbase.pb.RegionSpecifier\"\031\n\027CleanupBu", + "uest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\013", + "2\031.hbase.pb.RegionSpecifier\"\031\n\027CleanupBu" + "lkLoadResponse\"a\n\026CoprocessorServiceCall" + "\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013m" + "ethod_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Cop" + @@ -40367,8 +40488,8 @@ public final class ClientProtos { "iceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + "gionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb.C" + "oprocessorServiceCall\"o\n\032CoprocessorServ" + - "iceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" + - "egionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb", + "iceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R", + "egionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb" + ".NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(\r" + "\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.MutationPr" + "oto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014servi" + @@ -40377,8 +40498,8 @@ public final class ClientProtos { "\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002 \001" + "(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c\n\017" + "RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010" + - "\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compaction" + - "Pressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStat", + "\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compaction", + "Pressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStat" + "s\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpeci" + "fier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLoad" + "Stats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001 \001" + @@ -40387,8 +40508,8 @@ public final class ClientProtos { "\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Copr" + "ocessorServiceResult\0220\n\tloadStats\030\005 \001(\0132" + "\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Region" + - "ActionResult\0226\n\021resultOrException\030\001 \003(\0132" + - "\033.hbase.pb.ResultOrException\022*\n\texceptio", + "ActionResult\0226\n\021resultOrException\030\001 \003(\0132", + "\033.hbase.pb.ResultOrException\022*\n\texceptio" + "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" + "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" + "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" + @@ -40397,8 +40518,8 @@ public final class ClientProtos { "2\034.hbase.pb.RegionActionResult\022\021\n\tproces" + "sed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036.hb" + "ase.pb.MultiRegionLoadStats*\'\n\013Consisten" + - "cy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientS" + - "ervice\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025.hb", + "cy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientS", + "ervice\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025.hb" + "ase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.pb." + "MutateRequest\032\030.hbase.pb.MutateResponse\022" + "5\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase.p" + @@ -40407,8 +40528,8 @@ public final class ClientProtos { "oadHFileResponse\022V\n\017PrepareBulkLoad\022 .hb" + "ase.pb.PrepareBulkLoadRequest\032!.hbase.pb" + ".PrepareBulkLoadResponse\022V\n\017CleanupBulkL" + - "oad\022 .hbase.pb.CleanupBulkLoadRequest\032!." + - "hbase.pb.CleanupBulkLoadResponse\022X\n\013Exec", + "oad\022 .hbase.pb.CleanupBulkLoadRequest\032!.", + "hbase.pb.CleanupBulkLoadResponse\022X\n\013Exec" + "Service\022#.hbase.pb.CoprocessorServiceReq" + "uest\032$.hbase.pb.CoprocessorServiceRespon" + "se\022d\n\027ExecRegionServerService\022#.hbase.pb" + @@ -40458,7 +40579,7 @@ public final class ClientProtos { internal_static_hbase_pb_Get_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_Get_descriptor, - new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", "CfTimeRange", }); + new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", "CfTimeRange", "LoadColumnFamiliesOnDemand", }); internal_static_hbase_pb_Result_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_Result_fieldAccessorTable = new diff --git a/hbase-protocol-shaded/src/main/protobuf/Client.proto b/hbase-protocol-shaded/src/main/protobuf/Client.proto index bdd28df2573..2feaa261a36 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Client.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Client.proto @@ -83,6 +83,7 @@ message Get { optional Consistency consistency = 12 [default = STRONG]; repeated ColumnFamilyTimeRange cf_time_range = 13; + optional bool load_column_families_on_demand = 14; /* DO NOT add defaults to load_column_families_on_demand. */ } message Result { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index f0141df886d..dc050e8de83 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -1960,6 +1960,24 @@ public final class ClientProtos { */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder getCfTimeRangeOrBuilder( int index); + + // optional bool load_column_families_on_demand = 14; + /** + * optional bool load_column_families_on_demand = 14; + * + *
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ */ + boolean hasLoadColumnFamiliesOnDemand(); + /** + * optional bool load_column_families_on_demand = 14; + * + *
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ */ + boolean getLoadColumnFamiliesOnDemand(); } /** * Protobuf type {@code hbase.pb.Get} @@ -2110,6 +2128,11 @@ public final class ClientProtos { cfTimeRange_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.PARSER, extensionRegistry)); break; } + case 112: { + bitField0_ |= 0x00000200; + loadColumnFamiliesOnDemand_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -2433,6 +2456,30 @@ public final class ClientProtos { return cfTimeRange_.get(index); } + // optional bool load_column_families_on_demand = 14; + public static final int LOAD_COLUMN_FAMILIES_ON_DEMAND_FIELD_NUMBER = 14; + private boolean loadColumnFamiliesOnDemand_; + /** + * optional bool load_column_families_on_demand = 14; + * + *
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ */ + public boolean hasLoadColumnFamiliesOnDemand() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bool load_column_families_on_demand = 14; + * + *
+     * DO NOT add defaults to load_column_families_on_demand. 
+     * 
+ */ + public boolean getLoadColumnFamiliesOnDemand() { + return loadColumnFamiliesOnDemand_; + } + private void initFields() { row_ = com.google.protobuf.ByteString.EMPTY; column_ = java.util.Collections.emptyList(); @@ -2446,6 +2493,7 @@ public final class ClientProtos { existenceOnly_ = false; consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; cfTimeRange_ = java.util.Collections.emptyList(); + loadColumnFamiliesOnDemand_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2523,6 +2571,9 @@ public final class ClientProtos { for (int i = 0; i < cfTimeRange_.size(); i++) { output.writeMessage(13, cfTimeRange_.get(i)); } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBool(14, loadColumnFamiliesOnDemand_); + } getUnknownFields().writeTo(output); } @@ -2580,6 +2631,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(13, cfTimeRange_.get(i)); } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(14, loadColumnFamiliesOnDemand_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -2654,6 +2709,11 @@ public final class ClientProtos { } result = result && getCfTimeRangeList() .equals(other.getCfTimeRangeList()); + result = result && (hasLoadColumnFamiliesOnDemand() == other.hasLoadColumnFamiliesOnDemand()); + if (hasLoadColumnFamiliesOnDemand()) { + result = result && (getLoadColumnFamiliesOnDemand() + == other.getLoadColumnFamiliesOnDemand()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -2715,6 +2775,10 @@ public final class ClientProtos { hash = (37 * hash) + CF_TIME_RANGE_FIELD_NUMBER; hash = (53 * hash) + getCfTimeRangeList().hashCode(); } + if (hasLoadColumnFamiliesOnDemand()) { + hash = (37 * hash) + LOAD_COLUMN_FAMILIES_ON_DEMAND_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getLoadColumnFamiliesOnDemand()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -2880,6 +2944,8 @@ public final class ClientProtos { } else { cfTimeRangeBuilder_.clear(); } + loadColumnFamiliesOnDemand_ = false; + bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -2979,6 +3045,10 @@ public final class ClientProtos { } else { result.cfTimeRange_ = cfTimeRangeBuilder_.build(); } + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000200; + } + result.loadColumnFamiliesOnDemand_ = loadColumnFamiliesOnDemand_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3100,6 +3170,9 @@ public final class ClientProtos { } } } + if (other.hasLoadColumnFamiliesOnDemand()) { + setLoadColumnFamiliesOnDemand(other.getLoadColumnFamiliesOnDemand()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -4366,6 +4439,55 @@ public final class ClientProtos { return cfTimeRangeBuilder_; } + // optional bool load_column_families_on_demand = 14; + private boolean loadColumnFamiliesOnDemand_ ; + /** + * optional bool load_column_families_on_demand = 14; + * + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ */ + public boolean hasLoadColumnFamiliesOnDemand() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * optional bool load_column_families_on_demand = 14; + * + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ */ + public boolean getLoadColumnFamiliesOnDemand() { + return loadColumnFamiliesOnDemand_; + } + /** + * optional bool load_column_families_on_demand = 14; + * + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ */ + public Builder setLoadColumnFamiliesOnDemand(boolean value) { + bitField0_ |= 0x00001000; + loadColumnFamiliesOnDemand_ = value; + onChanged(); + return this; + } + /** + * optional bool load_column_families_on_demand = 14; + * + *
+       * DO NOT add defaults to load_column_families_on_demand. 
+       * 
+ */ + public Builder clearLoadColumnFamiliesOnDemand() { + bitField0_ = (bitField0_ & ~0x00001000); + loadColumnFamiliesOnDemand_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.Get) } @@ -39332,7 +39454,7 @@ public final class ClientProtos { "o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" + "label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" + "ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" + - "ualifier\030\002 \003(\014\"\226\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + + "ualifier\030\002 \003(\014\"\276\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + "olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" + "te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" + "ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" + @@ -39342,11 +39464,12 @@ public final class ClientProtos { " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" + "onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" + "\006STRONG\0226\n\rcf_time_range\030\r \003(\0132\037.hbase.p" + - "b.ColumnFamilyTimeRange\"\203\001\n\006Result\022\034\n\004ce" + + "b.ColumnFamilyTimeRange\022&\n\036load_column_f" + + "amilies_on_demand\030\016 \001(\010\"\203\001\n\006Result\022\034\n\004ce" + "ll\030\001 \003(\0132\016.hbase.pb.Cell\022\035\n\025associated_c" + "ell_count\030\002 \001(\005\022\016\n\006exists\030\003 \001(\010\022\024\n\005stale" + - "\030\004 \001(\010:\005false\022\026\n\007partial\030\005 \001(\010:\005false\"S\n" + - "\nGetRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R", + "\030\004 \001(\010:\005false\022\026\n\007partial\030\005 \001(\010:\005false\"S\n", + "\nGetRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" + "egionSpecifier\022\032\n\003get\030\002 \002(\0132\r.hbase.pb.G" + "et\"/\n\013GetResponse\022 \n\006result\030\001 \001(\0132\020.hbas" + "e.pb.Result\"\222\001\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016" + @@ -39355,8 +39478,8 @@ public final class ClientProtos { "\n\ncomparator\030\005 \002(\0132\024.hbase.pb.Comparator" + "\"\364\006\n\rMutationProto\022\013\n\003row\030\001 \001(\014\0229\n\013mutat" + "e_type\030\002 \001(\0162$.hbase.pb.MutationProto.Mu" + - "tationType\0229\n\014column_value\030\003 \003(\0132#.hbase" + - ".pb.MutationProto.ColumnValue\022\021\n\ttimesta", + "tationType\0229\n\014column_value\030\003 \003(\0132#.hbase", + ".pb.MutationProto.ColumnValue\022\021\n\ttimesta" + "mp\030\004 \001(\004\022*\n\tattribute\030\005 \003(\0132\027.hbase.pb.N" + "ameBytesPair\022C\n\ndurability\030\006 \001(\0162\".hbase" + ".pb.MutationProto.Durability:\013USE_DEFAUL" + @@ -39365,8 +39488,8 @@ public final class ClientProtos { "ce\030\t \001(\004\032\371\001\n\013ColumnValue\022\016\n\006family\030\001 \002(\014" + "\022K\n\017qualifier_value\030\002 \003(\01322.hbase.pb.Mut" + "ationProto.ColumnValue.QualifierValue\032\214\001" + - "\n\016QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005v" + - "alue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\0227\n\013delete_", + "\n\016QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005v", + "alue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\0227\n\013delete_" + "type\030\004 \001(\0162\".hbase.pb.MutationProto.Dele" + "teType\022\014\n\004tags\030\005 \001(\014\"W\n\nDurability\022\017\n\013US" + "E_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002" + @@ -39375,8 +39498,8 @@ public final class ClientProtos { "\002\022\n\n\006DELETE\020\003\"p\n\nDeleteType\022\026\n\022DELETE_ON" + "E_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020" + "\001\022\021\n\rDELETE_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VE" + - "RSION\020\003\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(" + - "\0132\031.hbase.pb.RegionSpecifier\022)\n\010mutation", + "RSION\020\003\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(", + "\0132\031.hbase.pb.RegionSpecifier\022)\n\010mutation" + "\030\002 \002(\0132\027.hbase.pb.MutationProto\022&\n\tcondi" + "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" + "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" + @@ -39385,8 +39508,8 @@ public final class ClientProtos { "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" + "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" + "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" + - "r\022\'\n\ntime_range\030\006 \001(\0132\023.hbase.pb.TimeRan" + - "ge\022\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blo", + "r\022\'\n\ntime_range\030\006 \001(\0132\023.hbase.pb.TimeRan", + "ge\022\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blo" + "cks\030\010 \001(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017m" + "ax_result_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(" + "\r\022\024\n\014store_offset\030\014 \001(\r\022&\n\036load_column_f" + @@ -39395,8 +39518,8 @@ public final class ClientProtos { " \001(\0162\025.hbase.pb.Consistency:\006STRONG\022\017\n\007c" + "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " + "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" + - "lumnFamilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006r" + - "egion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034", + "lumnFamilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006r", + "egion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034" + "\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_" + "id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclos" + "e_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037" + @@ -39405,8 +39528,8 @@ public final class ClientProtos { "_metrics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n" + "\014ScanResponse\022\030\n\020cells_per_result\030\001 \003(\r\022" + "\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(" + - "\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase." + - "pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag", + "\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.", + "pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag" + "_per_result\030\007 \003(\010\022\036\n\026more_results_in_reg" + "ion\030\010 \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014" + "scan_metrics\030\n \001(\0132\025.hbase.pb.ScanMetric" + @@ -39415,8 +39538,8 @@ public final class ClientProtos { "path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReque" + "st.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022+\n" + "\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationTok" + - "en\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(" + - "\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014", + "en\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(", + "\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014" + "\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n" + "\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\niden" + "tifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind\030\003" + @@ -39425,8 +39548,8 @@ public final class ClientProtos { "ableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Regi" + "onSpecifier\"-\n\027PrepareBulkLoadResponse\022\022" + "\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadReq" + - "uest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\013" + - "2\031.hbase.pb.RegionSpecifier\"\031\n\027CleanupBu", + "uest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\013", + "2\031.hbase.pb.RegionSpecifier\"\031\n\027CleanupBu" + "lkLoadResponse\"a\n\026CoprocessorServiceCall" + "\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013m" + "ethod_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Cop" + @@ -39435,8 +39558,8 @@ public final class ClientProtos { "iceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + "gionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb.C" + "oprocessorServiceCall\"o\n\032CoprocessorServ" + - "iceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" + - "egionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb", + "iceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R", + "egionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb" + ".NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(\r" + "\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.MutationPr" + "oto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014servi" + @@ -39445,8 +39568,8 @@ public final class ClientProtos { "\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002 \001" + "(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c\n\017" + "RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010" + - "\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compaction" + - "Pressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStat", + "\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compaction", + "Pressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStat" + "s\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpeci" + "fier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLoad" + "Stats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001 \001" + @@ -39455,8 +39578,8 @@ public final class ClientProtos { "\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Copr" + "ocessorServiceResult\0220\n\tloadStats\030\005 \001(\0132" + "\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Region" + - "ActionResult\0226\n\021resultOrException\030\001 \003(\0132" + - "\033.hbase.pb.ResultOrException\022*\n\texceptio", + "ActionResult\0226\n\021resultOrException\030\001 \003(\0132", + "\033.hbase.pb.ResultOrException\022*\n\texceptio" + "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" + "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" + "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" + @@ -39465,8 +39588,8 @@ public final class ClientProtos { "2\034.hbase.pb.RegionActionResult\022\021\n\tproces" + "sed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036.hb" + "ase.pb.MultiRegionLoadStats*\'\n\013Consisten" + - "cy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientS" + - "ervice\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025.hb", + "cy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientS", + "ervice\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025.hb" + "ase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.pb." + "MutateRequest\032\030.hbase.pb.MutateResponse\022" + "5\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase.p" + @@ -39475,8 +39598,8 @@ public final class ClientProtos { "oadHFileResponse\022V\n\017PrepareBulkLoad\022 .hb" + "ase.pb.PrepareBulkLoadRequest\032!.hbase.pb" + ".PrepareBulkLoadResponse\022V\n\017CleanupBulkL" + - "oad\022 .hbase.pb.CleanupBulkLoadRequest\032!." + - "hbase.pb.CleanupBulkLoadResponse\022X\n\013Exec", + "oad\022 .hbase.pb.CleanupBulkLoadRequest\032!.", + "hbase.pb.CleanupBulkLoadResponse\022X\n\013Exec" + "Service\022#.hbase.pb.CoprocessorServiceReq" + "uest\032$.hbase.pb.CoprocessorServiceRespon" + "se\022d\n\027ExecRegionServerService\022#.hbase.pb" + @@ -39514,7 +39637,7 @@ public final class ClientProtos { internal_static_hbase_pb_Get_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Get_descriptor, - new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", "CfTimeRange", }); + new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", "CfTimeRange", "LoadColumnFamiliesOnDemand", }); internal_static_hbase_pb_Result_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_Result_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 6c0c00c1a03..237b9320c64 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -83,6 +83,7 @@ message Get { optional Consistency consistency = 12 [default = STRONG]; repeated ColumnFamilyTimeRange cf_time_range = 13; + optional bool load_column_families_on_demand = 14; /* DO NOT add defaults to load_column_families_on_demand. */ } message Result { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d6ec5c637c9..ccc92d1eff8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -6872,7 +6872,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } long before = EnvironmentEdgeManager.currentTime(); Scan scan = new Scan(get); - + if (scan.getLoadColumnFamiliesOnDemandValue() == null) { + scan.setLoadColumnFamiliesOnDemand(isLoadingCfsOnDemandDefault()); + } RegionScanner scanner = null; try { scanner = getScanner(scan, null, nonceGroup, nonce); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index a7eb6062ada..8f57710bd05 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2250,7 +2250,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } long before = EnvironmentEdgeManager.currentTime(); Scan scan = new Scan(get); - + if (scan.getLoadColumnFamiliesOnDemandValue() == null) { + scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); + } RegionScanner scanner = null; try { scanner = region.getScanner(scan); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 80337a2c886..f7d3a5e2ec6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -107,8 +107,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -5420,6 +5418,47 @@ public class TestFromClientSide { TEST_UTIL.deleteTable(TABLE); } + @Test + public void testEmptyFilterList() throws Exception { + // Test Initialization. + TableName TABLE = TableName.valueOf("testEmptyFilterList"); + Table table = TEST_UTIL.createTable(TABLE, FAMILY); + + // Insert one row each region + Put put = new Put(Bytes.toBytes("row")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + List scanResults = new LinkedList<>(); + Scan scan = new Scan(); + scan.setFilter(new FilterList()); + try (ResultScanner scanner = table.getScanner(scan)) { + for (Result r : scanner) { + scanResults.add(r); + } + } + + Get g = new Get(Bytes.toBytes("row")); + g.setFilter(new FilterList()); + Result getResult = table.get(g); + if (scanResults.isEmpty()) { + assertTrue(getResult.isEmpty()); + } else if(scanResults.size() == 1) { + Result scanResult = scanResults.get(0); + assertEquals(scanResult.rawCells().length, getResult.rawCells().length); + for (int i = 0; i != scanResult.rawCells().length; ++i) { + Cell scanCell = scanResult.rawCells()[i]; + Cell getCell = getResult.rawCells()[i]; + assertEquals(0, Bytes.compareTo(CellUtil.cloneRow(scanCell), CellUtil.cloneRow(getCell))); + assertEquals(0, Bytes.compareTo(CellUtil.cloneFamily(scanCell), CellUtil.cloneFamily(getCell))); + assertEquals(0, Bytes.compareTo(CellUtil.cloneQualifier(scanCell), CellUtil.cloneQualifier(getCell))); + assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(scanCell), CellUtil.cloneValue(getCell))); + } + } else { + fail("The result retrieved from SCAN and Get should be same"); + } + } + @Test public void testSmallScan() throws Exception { // Test Initialization. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java index 11d5eda16e7..b002b575793 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java @@ -82,7 +82,7 @@ public class TestProtobufUtil { columnBuilder.clear(); columnBuilder.setFamily(ByteString.copyFromUtf8("f2")); getBuilder.addColumn(columnBuilder.build()); - + getBuilder.setLoadColumnFamiliesOnDemand(true); ClientProtos.Get proto = getBuilder.build(); // default fields assertEquals(1, proto.getMaxVersions());