HBASE-14755 Fix some broken links and HTML problems
This commit is contained in:
parent
68b94886a5
commit
bfa3689190
|
@ -44,7 +44,7 @@ import org.apache.hadoop.mapreduce.Partitioner;
|
|||
* <p>This class is not suitable as partitioner creating hfiles
|
||||
* for incremental bulk loads as region spread will likely change between time of
|
||||
* hfile creation and load time. See {@link LoadIncrementalHFiles}
|
||||
* and <a href="http://hbase.apache.org/docs/current/bulk-loads.html">Bulk Load</a>.
|
||||
* and <a href="http://hbase.apache.org/book.html#arch.bulk.load">Bulk Load</a>.
|
||||
*
|
||||
* @param <KEY> The type of the key.
|
||||
* @param <VALUE> The type of the value.
|
||||
|
|
27
pom.xml
27
pom.xml
|
@ -2754,6 +2754,9 @@
|
|||
<javadocDir>${project.reporting.outputDirectory}/devapidocs</javadocDir>
|
||||
<testJavadocDir>${project.reporting.outputDirectory}/testdevapidocs</testJavadocDir>
|
||||
<destDir>${project.reporting.outputDirectory}/xref</destDir>
|
||||
<excludes>
|
||||
<exclude>**/generated/*</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
|
@ -2767,7 +2770,6 @@
|
|||
<id>devapi</id>
|
||||
<reports>
|
||||
<report>aggregate</report>
|
||||
<report>test-aggregate</report>
|
||||
</reports>
|
||||
<configuration>
|
||||
<destDir>devapidocs</destDir>
|
||||
|
@ -2778,7 +2780,8 @@
|
|||
<exclude>**/protobuf/*</exclude>
|
||||
<exclude>**/*.scala</exclude>
|
||||
</sourceFileExcludes>
|
||||
<excludePackageNames>*.generated.master:*.generated:org.apache.hadoop.hbase.tmpl.common:com.google.protobuf:org.apache.hadoop.hbase.spark</excludePackageNames>
|
||||
<excludePackageNames>org.apache.hadoop.hbase.tmpl.common:com.google.protobuf:org.apache.hadoop.hbase.spark:org.apache.hadoop.hbase.generated*</excludePackageNames>
|
||||
<show>private</show> <!-- (shows all classes and members) -->
|
||||
<quiet>true</quiet>
|
||||
<linksource>true</linksource>
|
||||
<sourcetab>2</sourcetab>
|
||||
|
@ -2814,13 +2817,14 @@
|
|||
<configuration>
|
||||
<destDir>testdevapidocs</destDir>
|
||||
<name>Developer API</name>
|
||||
<description>The full HBase API, including private and unstable APIs</description>
|
||||
<description>The full HBase API test code, including private and unstable APIs</description>
|
||||
<sourceFileExcludes>
|
||||
<exclude>**/generated/*</exclude>
|
||||
<exclude>**/protobuf/*</exclude>
|
||||
<exclude>**/*.scala</exclude>
|
||||
</sourceFileExcludes>
|
||||
<excludePackageNames>*.generated.master:*.generated:org.apache.hadoop.hbase.tmpl.common:com.google.protobuf:org.apache.hadoop.hbase.spark</excludePackageNames>
|
||||
<excludePackageNames>org.apache.hadoop.hbase.tmpl.common:com.google.protobuf:org.apache.hadoop.hbase.spark:org.apache.hadoop.hbase.generated*</excludePackageNames>
|
||||
<show>private</show> <!-- (shows all classes and members) -->
|
||||
<quiet>true</quiet>
|
||||
<linksource>true</linksource>
|
||||
<sourcetab>2</sourcetab>
|
||||
|
@ -2869,7 +2873,7 @@
|
|||
<name>User API</name>
|
||||
<description>The HBase Application Programmer's API</description>
|
||||
<excludePackageNames>
|
||||
org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.hbase.zookeeper.lock:org.apache.hadoop.metrics2*
|
||||
org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.hbase.zookeeper.lock:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress*
|
||||
</excludePackageNames>
|
||||
<!-- switch on dependency-driven aggregation -->
|
||||
<includeDependencySources>false</includeDependencySources>
|
||||
|
@ -2877,11 +2881,8 @@
|
|||
<!-- include ONLY dependencies I control -->
|
||||
<dependencySourceInclude>org.apache.hbase:hbase-annotations</dependencySourceInclude>
|
||||
</dependencySourceIncludes>
|
||||
<outputDirectory>${project.reporting.outputDirectory}/devapidocs</outputDirectory>
|
||||
<name>Developer API</name>
|
||||
<description>The full HBase API, including private and unstable APIs</description>
|
||||
<sourceFilesExclude>**/generated/*</sourceFilesExclude>
|
||||
<excludePackageNames>org.apache.hadoop.hbase.generated.master:org.apache.hadoop.hbase.protobuf.generated:org.apache.hadoop.hbase.tmpl.common</excludePackageNames>
|
||||
<show>protected</show> <!-- (shows only public and protected classes and members) -->
|
||||
<quiet>true</quiet>
|
||||
<linksource>true</linksource>
|
||||
<sourcetab>2</sourcetab>
|
||||
|
@ -2909,6 +2910,7 @@
|
|||
<inherited>false</inherited>
|
||||
</configuration>
|
||||
</reportSet>
|
||||
<!-- User Test API -->
|
||||
<reportSet>
|
||||
<id>testuserapi</id>
|
||||
<reports>
|
||||
|
@ -2928,7 +2930,7 @@
|
|||
<name>User API</name>
|
||||
<description>The HBase Application Programmer's API</description>
|
||||
<excludePackageNames>
|
||||
org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.hbase.zookeeper.lock:org.apache.hadoop.metrics2*
|
||||
org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop.hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.hbase.zookeeper.lock:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress*
|
||||
</excludePackageNames>
|
||||
<!-- switch on dependency-driven aggregation -->
|
||||
<includeDependencySources>false</includeDependencySources>
|
||||
|
@ -2936,11 +2938,8 @@
|
|||
<!-- include ONLY dependencies I control -->
|
||||
<dependencySourceInclude>org.apache.hbase:hbase-annotations</dependencySourceInclude>
|
||||
</dependencySourceIncludes>
|
||||
<outputDirectory>${project.reporting.outputDirectory}/devapidocs</outputDirectory>
|
||||
<name>Developer API</name>
|
||||
<description>The full HBase API, including private and unstable APIs</description>
|
||||
<sourceFilesExclude>**/generated/*</sourceFilesExclude>
|
||||
<excludePackageNames>org.apache.hadoop.hbase.generated.master:org.apache.hadoop.hbase.protobuf.generated:org.apache.hadoop.hbase.tmpl.common</excludePackageNames>
|
||||
<show>protected</show> <!-- (shows only public and protected classes and members) -->
|
||||
<quiet>true</quiet>
|
||||
<linksource>true</linksource>
|
||||
<sourcetab>2</sourcetab>
|
||||
|
|
|
@ -222,7 +222,7 @@ In contrast with version 1, in a version 2 HFile Bloom filter metadata is stored
|
|||
|
||||
==== File Info format in versions 1 and 2
|
||||
|
||||
The file info block is a serialized link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/io/HbaseMapWritable.html[HbaseMapWritable] (essentially a map from byte arrays to byte arrays) with the following keys, among others.
|
||||
The file info block is a serialized link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/HbaseMapWritable.html[HbaseMapWritable] (essentially a map from byte arrays to byte arrays) with the following keys, among others.
|
||||
StoreFile-level logic adds more keys to this.
|
||||
|
||||
[cols="1,1", frame="all"]
|
||||
|
|
|
@ -138,7 +138,10 @@ A region with an empty start key is the first region in a table.
|
|||
If a region has both an empty start and an empty end key, it is the only region in the table
|
||||
====
|
||||
|
||||
In the (hopefully unlikely) event that programmatic processing of catalog metadata is required, see the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/util/Writables.html#getHRegionInfo%28byte[]%29[Writables] utility.
|
||||
In the (hopefully unlikely) event that programmatic processing of catalog metadata
|
||||
is required, see the
|
||||
link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/Writables.html#getHRegionInfo%28byte[]%29[Writables]
|
||||
utility.
|
||||
|
||||
[[arch.catalog.startup]]
|
||||
=== Startup Sequencing
|
||||
|
@ -235,11 +238,11 @@ Please use link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/C
|
|||
[[client.writebuffer]]
|
||||
=== WriteBuffer and Batch Methods
|
||||
|
||||
In HBase 1.0 and later, link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/HTable.html[HTable] is deprecated in favor of link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table]. `Table` does not use autoflush. To do buffered writes, use the BufferedMutator class.
|
||||
In HBase 1.0 and later, link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTable.html[HTable] is deprecated in favor of link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table]. `Table` does not use autoflush. To do buffered writes, use the BufferedMutator class.
|
||||
|
||||
Before a `Table` or `HTable` instance is discarded, invoke either `close()` or `flushCommits()`, so `Put`s will not be lost.
|
||||
|
||||
For additional information on write durability, review the link:../acid-semantics.html[ACID semantics] page.
|
||||
For additional information on write durability, review the link:/acid-semantics.html[ACID semantics] page.
|
||||
|
||||
For fine-grained control of batching of ``Put``s or ``Delete``s, see the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#batch%28java.util.List%29[batch] methods on Table.
|
||||
|
||||
|
@ -759,7 +762,7 @@ When we go to look for a cached block, we look first in L1 and if none found, th
|
|||
Let us call this deploy format, _Raw L1+L2_.
|
||||
|
||||
Other BucketCache configs include: specifying a location to persist cache to across restarts, how many threads to use writing the cache, etc.
|
||||
See the link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html[CacheConfig.html] class for configuration options and descriptions.
|
||||
See the link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html[CacheConfig.html] class for configuration options and descriptions.
|
||||
|
||||
|
||||
|
||||
|
@ -1368,8 +1371,10 @@ The RegionServer splits a region, offlines the split region and then adds the da
|
|||
See <<disable.splitting>> for how to manually manage splits (and for why you might do this).
|
||||
|
||||
==== Custom Split Policies
|
||||
You can override the default split policy using a custom link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.html[RegionSplitPolicy](HBase 0.94+). Typically a custom split policy should extend
|
||||
HBase's default split policy: link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.html[IncreasingToUpperBoundRegionSplitPolicy].
|
||||
You can override the default split policy using a custom
|
||||
link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.html[RegionSplitPolicy](HBase 0.94+).
|
||||
Typically a custom split policy should extend HBase's default split policy:
|
||||
link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.html[IncreasingToUpperBoundRegionSplitPolicy].
|
||||
|
||||
The policy can set globally through the HBase configuration or on a per-table
|
||||
basis.
|
||||
|
@ -1398,7 +1403,10 @@ hbase> create 'test', {METHOD => 'table_att', CONFIG => {'SPLIT_POLICY' => 'org.
|
|||
{NAME => 'cf1'}
|
||||
----
|
||||
|
||||
The default split policy can be overwritten using a custom link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.html[RegionSplitPolicy(HBase 0.94+)]. Typically a custom split policy should extend HBase's default split policy: link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.html[ConstantSizeRegionSplitPolicy].
|
||||
The default split policy can be overwritten using a custom
|
||||
link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.html
|
||||
[RegionSplitPolicy(HBase 0.94+)]. Typically a custom split policy should extend HBase's default split policy:
|
||||
link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.html[ConstantSizeRegionSplitPolicy].
|
||||
|
||||
The policy can be set globally through the HBaseConfiguration used or on a per table basis:
|
||||
[source,java]
|
||||
|
@ -1445,9 +1453,15 @@ Using a Custom Algorithm::
|
|||
The RegionSplitter tool is provided with HBase, and uses a _SplitAlgorithm_ to determine split points for you.
|
||||
As parameters, you give it the algorithm, desired number of regions, and column families.
|
||||
It includes two split algorithms.
|
||||
The first is the `link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html[HexStringSplit]` algorithm, which assumes the row keys are hexadecimal strings.
|
||||
The second, `link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html[UniformSplit]`, assumes the row keys are random byte arrays.
|
||||
You will probably need to develop your own `link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html[SplitAlgorithm]`, using the provided ones as models.
|
||||
The first is the
|
||||
`link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html[HexStringSplit]`
|
||||
algorithm, which assumes the row keys are hexadecimal strings.
|
||||
The second,
|
||||
`link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html[UniformSplit]`,
|
||||
assumes the row keys are random byte arrays.
|
||||
You will probably need to develop your own
|
||||
`link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html[SplitAlgorithm]`,
|
||||
using the provided ones as models.
|
||||
|
||||
=== Online Region Merges
|
||||
|
||||
|
|
|
@ -515,8 +515,7 @@ To implement this functionality we will take the help of Observer Coprocessor.
|
|||
Following are the implementation steps:
|
||||
|
||||
. Write a class that extends the
|
||||
// Below URL is more than 100 characters long.
|
||||
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.html[BaseRegionObserver]
|
||||
link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.html[BaseRegionObserver]
|
||||
class.
|
||||
|
||||
. Override the 'preGetOp()' method (Note that 'preGet()' method is now deprecated). The reason for
|
||||
|
|
|
@ -552,7 +552,7 @@ hash-joins). So which is the best approach? It depends on what you are trying to
|
|||
|
||||
== ACID
|
||||
|
||||
See link:http://hbase.apache.org/acid-semantics.html[ACID Semantics].
|
||||
See link:/acid-semantics.html[ACID Semantics].
|
||||
Lars Hofhansl has also written a note on link:http://hadoop-hbase.blogspot.com/2012/03/acid-in-hbase.html[ACID in HBase].
|
||||
|
||||
ifdef::backend-docbook[]
|
||||
|
|
|
@ -637,10 +637,14 @@ See link:https://issues.apache.org/jira/browse/HBASE-4391[HBASE-4391 Add ability
|
|||
[[compaction.tool]]
|
||||
=== Offline Compaction Tool
|
||||
|
||||
See the usage for the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/CompactionTool.html[Compaction
|
||||
Tool].
|
||||
Run it like this +./bin/hbase
|
||||
org.apache.hadoop.hbase.regionserver.CompactionTool+
|
||||
See the usage for the
|
||||
link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionTool.html[CompactionTool].
|
||||
Run it like:
|
||||
|
||||
[source, bash]
|
||||
----
|
||||
$ ./bin/hbase org.apache.hadoop.hbase.regionserver.CompactionTool
|
||||
----
|
||||
|
||||
=== `hbase clean`
|
||||
|
||||
|
|
|
@ -196,7 +196,8 @@ tableDesc.addFamily(cfDesc);
|
|||
----
|
||||
====
|
||||
|
||||
See the API documentation for link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html[CacheConfig].
|
||||
See the API documentation for
|
||||
link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html[CacheConfig].
|
||||
|
||||
[[perf.rs.memstore.size]]
|
||||
=== `hbase.regionserver.global.memstore.size`
|
||||
|
|
|
@ -1334,7 +1334,7 @@ static Table createTableAndWriteDataWithLabels(TableName tableName, String... la
|
|||
|
||||
<<reading_cells_with_labels>>
|
||||
==== Reading Cells with Labels
|
||||
When you issue a Scan or Get, HBase uses your default set of authorizations to filter out cells that you do not have access to. A superuser can set the default set of authorizations for a given user by using the `set_auths` HBase Shell command or the link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityClient.html#setAuths(org.apache.hadoop.conf.Configuration,%20java.lang.String\[\],%20java.lang.String)[VisibilityClient.setAuths()] method.
|
||||
When you issue a Scan or Get, HBase uses your default set of authorizations to filter out cells that you do not have access to. A superuser can set the default set of authorizations for a given user by using the `set_auths` HBase Shell command or the link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityClient.html#setAuths(org.apache.hadoop.hbase.client.Connection,%20java.lang.String[],%20java.lang.String)[VisibilityClient.setAuths()] method.
|
||||
|
||||
You can specify a different authorization during the Scan or Get, by passing the AUTHORIZATIONS option in HBase Shell, or the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setAuthorizations%28org.apache.hadoop.hbase.security.visibility.Authorizations%29[setAuthorizations()] method if you use the API. This authorization will be combined with your default set as an additional filter. It will further filter your results, rather than giving you additional authorization.
|
||||
|
||||
|
@ -1582,7 +1582,8 @@ Rotate the Master Key::
|
|||
=== Secure Bulk Load
|
||||
|
||||
Bulk loading in secure mode is a bit more involved than normal setup, since the client has to transfer the ownership of the files generated from the MapReduce job to HBase.
|
||||
Secure bulk loading is implemented by a coprocessor, named link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html[SecureBulkLoadEndpoint], which uses a staging directory configured by the configuration property `hbase.bulkload.staging.dir`, which defaults to _/tmp/hbase-staging/_.
|
||||
Secure bulk loading is implemented by a coprocessor, named link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
|
||||
[SecureBulkLoadEndpoint], which uses a staging directory configured by the configuration property `hbase.bulkload.staging.dir`, which defaults to _/tmp/hbase-staging/_.
|
||||
|
||||
.Secure Bulk Load Algorithm
|
||||
|
||||
|
|
|
@ -282,7 +282,7 @@ under the License.
|
|||
use HBase a map-reduce data source to overcome traditional query speed limits
|
||||
in MySQL.</dd>
|
||||
|
||||
<dt><a href=">http://www.tokenizer.org">Shopping Engine at Tokenizer</a></dt>
|
||||
<dt><a href="http://www.tokenizer.org">Shopping Engine at Tokenizer</a></dt>
|
||||
<dd>Shopping Engine at Tokenizer is a web crawler; it uses HBase to store URLs
|
||||
and Outlinks (AnchorText + LinkedURL): more than a billion. It was initially
|
||||
designed as Nutch-Hadoop extension, then (due to very specific 'shopping'
|
||||
|
|
Loading…
Reference in New Issue