diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index ecb0826b5ed..d012863a545 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -118,7 +118,7 @@ public class HColumnDescriptor implements WritableComparable /** * Default number of versions of a record to keep. */ - public static final int DEFAULT_VERSIONS = 3; + public static final int DEFAULT_VERSIONS = 1; /** * Default is not to keep a minimum of versions. @@ -151,7 +151,7 @@ public class HColumnDescriptor implements WritableComparable * is enabled. */ public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; - + /** * Default setting for whether to cache index blocks on write if block * caching is enabled. @@ -166,7 +166,7 @@ public class HColumnDescriptor implements WritableComparable /** * Default setting for whether or not to use bloomfilters. */ - public static final String DEFAULT_BLOOMFILTER = BloomType.NONE.toString(); + public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString(); /** * Default setting for whether to cache bloom filter blocks on write if block @@ -543,7 +543,7 @@ public class HColumnDescriptor implements WritableComparable return Compression.Algorithm.valueOf(n.toUpperCase()); } - /** @return compression type being used for the column family for major + /** @return compression type being used for the column family for major compression */ public Compression.Algorithm getCompactionCompression() { String n = getValue(COMPRESSION_COMPACT); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 8d2afcf36ef..ce9a0a586cd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; @@ -1287,6 +1288,8 @@ public class HTableDescriptor implements WritableComparable { .setInMemory(true) .setBlocksize(8 * 1024) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. + .setBloomFilterType(BloomType.NONE) }); static { diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index f0a19e8a3a9..e7cc7fd7f58 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -19,25 +19,51 @@ * limitations under the License. */ --> + + + + + + hbase.tmp.dir + ${java.io.tmpdir}/hbase-${user.name} + Temporary directory on the local filesystem. + Change this setting to point to a location more permanent + than '/tmp', the usual resolve for java.io.tmpdir, as the + '/tmp' directory is cleared on machine restart. + + hbase.rootdir - file:///tmp/hbase-${user.name}/hbase + file://${hbase.tmp.dir}/hbase The directory shared by region servers and into which HBase persists. The URL should be 'fully-qualified' to include the filesystem scheme. For example, to specify the HDFS directory '/hbase' where the HDFS instance's namenode is running at namenode.example.org on port 9000, set this value to: - hdfs://namenode.example.org:9000/hbase. By default HBase writes - into /tmp. Change this configuration else all data will be lost - on machine restart. + hdfs://namenode.example.org:9000/hbase. By default, we write + to whatever ${hbase.tmp.dir} is set too -- usually /tmp -- + so change this configuration or else all data will be lost on + machine restart. - - hbase.master.port - 60000 - The port the HBase Master should bind to. - hbase.cluster.distributed false @@ -48,14 +74,20 @@ - hbase.tmp.dir - ${java.io.tmpdir}/hbase-${user.name} - Temporary directory on the local filesystem. - Change this setting to point to a location more permanent - than '/tmp' (The '/tmp' directory is often cleared on - machine restart). + hbase.zookeeper.quorum + localhost + Comma separated list of servers in the ZooKeeper Quorum. + For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com". + By default this is set to localhost for local and pseudo-distributed modes + of operation. For a fully-distributed setup, this should be set to a full + list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh + this is the list of servers which hbase will start/stop ZooKeeper on as + part of cluster start/stop. + + hbase.local.dir ${hbase.tmp.dir}/local/ @@ -63,6 +95,13 @@ as a local storage. + + + + hbase.master.port + 60000 + The port the HBase Master should bind to. + hbase.master.info.port 60010 @@ -77,16 +116,57 @@ - hbase.client.write.buffer - 2097152 - Default size of the HTable clien write buffer in bytes. - A bigger buffer takes more memory -- on both the client and server - side since server instantiates the passed write buffer to process - it -- but a larger buffer size reduces the number of RPCs made. - For an estimate of server-side memory-used, evaluate - hbase.client.write.buffer * hbase.regionserver.handler.count + hbase.master.logcleaner.plugins + org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner + A comma-separated list of LogCleanerDelegate invoked by + the LogsCleaner service. These WAL/HLog cleaners are called in order, + so put the HLog cleaner that prunes the most HLog files in front. To + implement your own LogCleanerDelegate, just put it in HBase's classpath + and add the fully qualified class name here. Always add the above + default log cleaners in the list. + + hbase.master.logcleaner.ttl + 600000 + Maximum time a HLog can stay in the .oldlogdir directory, + after which it will be cleaned by a Master thread. + + + + hbase.master.hfilecleaner.plugins + org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner + A comma-separated list of HFileCleanerDelegate invoked by + the HFileCleaner service. These HFiles cleaners are called in order, + so put the cleaner that prunes the most files in front. To + implement your own HFileCleanerDelegate, just put it in HBase's classpath + and add the fully qualified class name here. Always add the above + default log cleaners in the list as they will be overwritten in hbase-site.xml. + + + + hbase.master.catalog.timeout + 600000 + Timeout value for the Catalog Janitor from the master to META. + + + + hbase.master.dns.interface + default + The name of the Network Interface from which a master + should report its IP address. + + + + hbase.master.dns.nameserver + default + The host name or IP address of the name server (DNS) + which a master should use to determine the host name used + for communication and display purposes. + + + + hbase.regionserver.port 60020 @@ -100,6 +180,12 @@ Set to -1 if you do not want the RegionServer UI to run. + + hbase.regionserver.info.bindAddress + 0.0.0.0 + The address for the HBase RegionServer web UI + + hbase.regionserver.info.port.auto false @@ -109,69 +195,12 @@ Useful for testing, turned off by default. - - hbase.regionserver.info.bindAddress - 0.0.0.0 - The address for the HBase RegionServer web UI - - - - hbase.client.pause - 1000 - General client pause value. Used mostly as value to wait - before running a retry of a failed get, region lookup, etc. - - - hbase.client.retries.number - 10 - Maximum retries. Used as maximum for all retryable - operations such as fetching of the root region from root region - server, getting a cell's value, starting a row update, etc. - Default: 10. - - - - hbase.bulkload.retries.number - 0 - Maximum retries. This is maximum number of iterations - to atomic bulk loads are attempted in the face of splitting operations - 0 means never give up. Default: 0. - - - - hbase.client.scanner.caching - 100 - Number of rows that will be fetched when calling next - on a scanner if it is not served from (local, client) memory. Higher - caching values will enable faster scanners but will eat up more memory - and some calls of next may take longer and longer times when the cache is empty. - Do not set this value such that the time between invocations is greater - than the scanner timeout; i.e. hbase.client.scanner.timeout.period - - - - hbase.client.keyvalue.maxsize - 10485760 - Specifies the combined maximum allowed size of a KeyValue - instance. This is to set an upper boundary for a single entry saved in a - storage file. Since they cannot be split it helps avoiding that a region - cannot be split any further because the data is too large. It seems wise - to set this to a fraction of the maximum region size. Setting it to zero - or less disables the check. - - - - hbase.client.scanner.timeout.period - 60000 - Client scanner lease period in milliseconds. Default is - 60 seconds. - hbase.regionserver.handler.count - 10 + 30 Count of RPC Listener instances spun up on RegionServers. Same property is used by the Master for count of master handlers. - Default is 10. + Default is 30. @@ -224,27 +253,37 @@ The HLog file writer implementation. - hbase.regionserver.nbreservationblocks - 4 - The number of resevoir blocks of memory release on - OOME so we can cleanup properly before server shutdown. + hbase.regionserver.global.memstore.upperLimit + 0.4 + Maximum size of all memstores in a region server before new + updates are blocked and flushes are forced. Defaults to 40% of heap. + Updates are blocked and flushes are forced until size of all memstores + in a region server hits hbase.regionserver.global.memstore.lowerLimit. - hbase.zookeeper.dns.interface - default - The name of the Network Interface from which a ZooKeeper server - should report its IP address. + hbase.regionserver.global.memstore.lowerLimit + 0.38 + Maximum size of all memstores in a region server before + flushes are forced. Defaults to 38% of heap. + This value equal to hbase.regionserver.global.memstore.upperLimit causes + the minimum possible flushing to occur when updates are blocked due to + memstore limiting. - hbase.zookeeper.dns.nameserver - default - The host name or IP address of the name server (DNS) - which a ZooKeeper server should use to determine the host name used by the - master for communication and display purposes. + hbase.regionserver.optionalcacheflushinterval + 3600000 + + Maximum amount of time an edit lives in memory before being automatically flushed. + Default 1 hour. Set it to 0 to disable automatic flushing. + + hbase.regionserver.catalog.timeout + 600000 + Timeout value for the Catalog Janitor from the regionserver to META. + hbase.regionserver.dns.interface default @@ -260,19 +299,212 @@ master for communication and display purposes. + + - hbase.master.dns.interface + zookeeper.session.timeout + 90000 + ZooKeeper session timeout. + HBase passes this to the zk quorum as suggested maximum time for a + session (This setting becomes zookeeper's 'maxSessionTimeout'). See + http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions + "The client sends a requested timeout, the server responds with the + timeout that it can give the client. " In milliseconds. + + + + zookeeper.znode.parent + /hbase + Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper + files that are configured with a relative path will go under this node. + By default, all of HBase's ZooKeeper file path are configured with a + relative path, so they will all go under this directory unless changed. + + + + zookeeper.znode.rootserver + root-region-server + Path to ZNode holding root region location. This is written by + the master and read by clients and region servers. If a relative path is + given, the parent folder will be ${zookeeper.znode.parent}. By default, + this means the root location is stored at /hbase/root-region-server. + + + + zookeeper.znode.acl.parent + acl + Root ZNode for access control lists. + + + hbase.zookeeper.dns.interface default - The name of the Network Interface from which a master + The name of the Network Interface from which a ZooKeeper server should report its IP address. - hbase.master.dns.nameserver + hbase.zookeeper.dns.nameserver default The host name or IP address of the name server (DNS) - which a master should use to determine the host name used - for communication and display purposes. + which a ZooKeeper server should use to determine the host name used by the + master for communication and display purposes. + + + + + hbase.zookeeper.peerport + 2888 + Port used by ZooKeeper peers to talk to each other. + Seehttp://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper + for more information. + + + + hbase.zookeeper.leaderport + 3888 + Port used by ZooKeeper for leader election. + See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper + for more information. + + + + + hbase.zookeeper.useMulti + false + Instructs HBase to make use of ZooKeeper's multi-update functionality. + This allows certain ZooKeeper operations to complete more quickly and prevents some issues + with rare Replication failure scenarios (see the release note of HBASE-2611 for an example). + IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+ + and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and + will not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495). + + + + hbase.config.read.zookeeper.config + false + + Set to true to allow HBaseConfiguration to read the + zoo.cfg file for ZooKeeper properties. Switching this to true + is not recommended, since the functionality of reading ZK + properties from a zoo.cfg file has been deprecated. + + + + + hbase.zookeeper.property.initLimit + 10 + Property from ZooKeeper's config zoo.cfg. + The number of ticks that the initial synchronization phase can take. + + + + hbase.zookeeper.property.syncLimit + 5 + Property from ZooKeeper's config zoo.cfg. + The number of ticks that can pass between sending a request and getting an + acknowledgment. + + + + hbase.zookeeper.property.dataDir + ${hbase.tmp.dir}/zookeeper + Property from ZooKeeper's config zoo.cfg. + The directory where the snapshot is stored. + + + + hbase.zookeeper.property.clientPort + 2181 + Property from ZooKeeper's config zoo.cfg. + The port at which the clients will connect. + + + + hbase.zookeeper.property.maxClientCnxns + 300 + Property from ZooKeeper's config zoo.cfg. + Limit on number of concurrent connections (at the socket level) that a + single client, identified by IP address, may make to a single member of + the ZooKeeper ensemble. Set high to avoid zk connection issues running + standalone and pseudo-distributed. + + + + + + + hbase.client.write.buffer + 2097152 + Default size of the HTable client write buffer in bytes. + A bigger buffer takes more memory -- on both the client and server + side since server instantiates the passed write buffer to process + it -- but a larger buffer size reduces the number of RPCs made. + For an estimate of server-side memory-used, evaluate + hbase.client.write.buffer * hbase.regionserver.handler.count + + + + hbase.client.pause + 100 + General client pause value. Used mostly as value to wait + before running a retry of a failed get, region lookup, etc. + + + hbase.client.retries.number + 20 + Maximum retries. Used as maximum for all retryable + operations such as the getting of a cell's value, starting a row update, etc. + Default: 10. + + + + hbase.client.scanner.caching + 100 + Number of rows that will be fetched when calling next + on a scanner if it is not served from (local, client) memory. Higher + caching values will enable faster scanners but will eat up more memory + and some calls of next may take longer and longer times when the cache is empty. + Do not set this value such that the time between invocations is greater + than the scanner timeout; i.e. hbase.client.scanner.timeout.period + + + + hbase.client.keyvalue.maxsize + 10485760 + Specifies the combined maximum allowed size of a KeyValue + instance. This is to set an upper boundary for a single entry saved in a + storage file. Since they cannot be split it helps avoiding that a region + cannot be split any further because the data is too large. It seems wise + to set this to a fraction of the maximum region size. Setting it to zero + or less disables the check. + + + + hbase.client.scanner.timeout.period + 60000 + Client scanner lease period in milliseconds. Default is + 60 seconds. + + + + + hbase.bulkload.retries.number + 0 + Maximum retries. This is maximum number of iterations + to atomic bulk loads are attempted in the face of splitting operations + 0 means never give up. Default: 0. @@ -289,43 +521,6 @@ Default is 20% slop. - - hbase.master.logcleaner.ttl - 600000 - Maximum time a HLog can stay in the .oldlogdir directory, - after which it will be cleaned by a Master thread. - - - - hbase.master.logcleaner.plugins - org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner - A comma-separated list of LogCleanerDelegate invoked by - the LogsCleaner service. These WAL/HLog cleaners are called in order, - so put the HLog cleaner that prunes the most HLog files in front. To - implement your own LogCleanerDelegate, just put it in HBase's classpath - and add the fully qualified class name here. Always add the above - default log cleaners in the list. - - - - hbase.regionserver.global.memstore.upperLimit - 0.4 - Maximum size of all memstores in a region server before new - updates are blocked and flushes are forced. Defaults to 40% of heap. - Updates are blocked and flushes are forced until size of all memstores - in a region server hits hbase.regionserver.global.memstore.lowerLimit. - - - - hbase.regionserver.global.memstore.lowerLimit - 0.35 - Maximum size of all memstores in a region server before - flushes are forced. Defaults to 35% of heap. - This value equal to hbase.regionserver.global.memstore.upperLimit causes - the minimum possible flushing to occur when updates are blocked due to - memstore limiting. - - hbase.server.thread.wakefrequency 10000 @@ -342,14 +537,6 @@ hbase.server.thread.wakefrequency milliseconds. - - hbase.regionserver.optionalcacheflushinterval - 3600000 - - Maximum amount of time an edit lives in memory before being automatically flushed. - Default 1 hour. Set it to 0 to disable automatic flushing. - - hbase.hregion.memstore.flush.size 134217728 @@ -405,6 +592,29 @@ Default: 10G. + + hbase.hregion.majorcompaction + 604800000 + The time (in miliseconds) between 'major' compactions of all + HStoreFiles in a region. Default: Set to 7 days. Major compactions tend to + happen exactly when you need them least so enable them such that they run at + off-peak for your deploy; or, since this setting is on a periodicity that is + unlikely to match your loading, run the compactions via an external + invocation out of a cron job or some such. + + + + hbase.hregion.majorcompaction.jitter + 0.50 + Jitter outer bound for major compactions. + On each regionserver, we multiply the hbase.region.majorcompaction + interval by some random fraction that is inside the bounds of this + maximum. We then add this + or - product to when the next + major compaction is to run. The idea is that major compaction + does happen on every regionserver at exactly the same time. The + smaller this number, the closer the compactions come together. + + hbase.hstore.compactionThreshold 3 @@ -417,7 +627,7 @@ hbase.hstore.blockingStoreFiles - 7 + 10 If more than this number of StoreFiles in any one Store (one StoreFile is written per flush of MemStore) then updates are @@ -441,14 +651,6 @@ Max number of HStoreFiles to compact per 'minor' compaction. - - hbase.hregion.majorcompaction - 86400000 - The time (in miliseconds) between 'major' compactions of all - HStoreFiles in a region. Default: 1 day. - Set to 0 to disable automated major compactions. - - hbase.storescanner.parallel.seek.enable false @@ -479,19 +681,12 @@ hfile.block.cache.size - 0.25 + 0.4 Percentage of maximum heap (-Xmx setting) to allocate to block cache - used by HFile/StoreFile. Default of 0.25 means allocate 25%. - Set to 0 to disable but it's not recommended. - - - - hbase.hash.type - murmur - The hashing algorithm for use in HashFunction. Two values are - supported now: murmur (MurmurHash) and jenkins (JenkinsHash). - Used by bloom filters. + used by HFile/StoreFile. Default of 0.4 means allocate 40%. + Set to 0 to disable but it's not recommended; you need at least + enough cache to hold the storefile indices. @@ -520,6 +715,13 @@ consistent with FixedFileTrailer.MAX_VERSION. + + hfile.block.bloom.cacheonwrite + false + + Enables cache-on-write for inline blocks of a compound Bloom filter. + + io.storefile.bloom.block.size 131072 @@ -530,13 +732,6 @@ block varies. - - hfile.block.bloom.cacheonwrite - false - - Enables cache-on-write for inline blocks of a compound Bloom filter. - - hbase.rs.cacheblocksonwrite false @@ -545,7 +740,6 @@ block is finished. - hbase.rpc.server.engine org.apache.hadoop.hbase.ipc.ProtobufRpcServerEngine @@ -553,6 +747,16 @@ used for server RPC call marshalling. + + hbase.rpc.timeout + 60000 + + This is for the RPC layer to define how long HBase client applications + take for a remote call to time out. It uses pings to check connections + but will eventually throw a TimeoutException. + The default value is 10000ms(10s). + + hbase.ipc.client.tcpnodelay true @@ -560,7 +764,6 @@ http://docs.oracle.com/javase/1.5.0/docs/api/java/net/Socket.html#getTcpNoDelay() - @@ -599,7 +802,6 @@ specified in hbase.regionserver.keytab.file - hadoop.policy.file @@ -631,43 +833,6 @@ authentication token expires. Only used when HBase security is enabled. - - - zookeeper.session.timeout - 180000 - ZooKeeper session timeout. - HBase passes this to the zk quorum as suggested maximum time for a - session (This setting becomes zookeeper's 'maxSessionTimeout'). See - http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions - "The client sends a requested timeout, the server responds with the - timeout that it can give the client. " In milliseconds. - - - - zookeeper.znode.parent - /hbase - Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper - files that are configured with a relative path will go under this node. - By default, all of HBase's ZooKeeper file path are configured with a - relative path, so they will all go under this directory unless changed. - - - - zookeeper.znode.rootserver - root-region-server - Path to ZNode holding root region location. This is written by - the master and read by clients and region servers. If a relative path is - given, the parent folder will be ${zookeeper.znode.parent}. By default, - this means the root location is stored at /hbase/root-region-server. - - - - - zookeeper.znode.acl.parent - acl - Root ZNode for access control lists. - - hbase.coprocessor.region.classes @@ -678,114 +843,6 @@ A coprocessor can also be loaded on demand by setting HTableDescriptor. - - - hbase.coprocessor.master.classes - - A comma-separated list of - org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are - loaded by default on the active HMaster process. For any implemented - coprocessor methods, the listed classes will be called in order. After - implementing your own MasterObserver, just put it in HBase's classpath - and add the fully qualified class name here. - - - - - - hbase.zookeeper.quorum - localhost - Comma separated list of servers in the ZooKeeper Quorum. - For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com". - By default this is set to localhost for local and pseudo-distributed modes - of operation. For a fully-distributed setup, this should be set to a full - list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh - this is the list of servers which we will start/stop ZooKeeper on. - - - - hbase.zookeeper.peerport - 2888 - Port used by ZooKeeper peers to talk to each other. - See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper - for more information. - - - - hbase.zookeeper.leaderport - 3888 - Port used by ZooKeeper for leader election. - See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper - for more information. - - - - hbase.zookeeper.useMulti - false - Instructs HBase to make use of ZooKeeper's multi-update functionality. - This allows certain ZooKeeper operations to complete more quickly and prevents some issues - with rare Replication failure scenarios (see the release note of HBASE-2611 for an example). - IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+ - and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will - not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495). - - - - - - - - hbase.zookeeper.property.initLimit - 10 - Property from ZooKeeper's config zoo.cfg. - The number of ticks that the initial synchronization phase can take. - - - - hbase.zookeeper.property.syncLimit - 5 - Property from ZooKeeper's config zoo.cfg. - The number of ticks that can pass between sending a request and getting an - acknowledgment. - - - - hbase.zookeeper.property.dataDir - ${hbase.tmp.dir}/zookeeper - Property from ZooKeeper's config zoo.cfg. - The directory where the snapshot is stored. - - - - hbase.zookeeper.property.clientPort - 2181 - Property from ZooKeeper's config zoo.cfg. - The port at which the clients will connect. - - - - hbase.zookeeper.property.maxClientCnxns - 300 - Property from ZooKeeper's config zoo.cfg. - Limit on number of concurrent connections (at the socket level) that a - single client, identified by IP address, may make to a single member of - the ZooKeeper ensemble. Set high to avoid zk connection issues running - standalone and pseudo-distributed. - - - hbase.rest.port 8080 @@ -800,7 +857,29 @@ true: Only the GET method is permitted. - + + hbase.rest.threads.max + 100 + + The maximum number of threads of the REST server thread pool. + Threads in the pool are reused to process REST requests. This + controls the maximum number of requests processed concurrently. + It may help to control the memory used by the REST server to + avoid OOM issues. If the thread pool is full, incoming requests + will be queued up and wait for some free threads. The default + is 100. + + + + hbase.rest.threads.min + 2 + + The minimum number of threads of the REST server thread pool. + The thread pool always has at least these number of threads so + the REST server is ready to serve incoming requests. The default + is 2. + + hbase.defaults.for.version @@@VERSION@@@ @@ -823,6 +902,17 @@ version is X.X.X-SNAPSHOT" + + hbase.coprocessor.master.classes + + A comma-separated list of + org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are + loaded by default on the active HMaster process. For any implemented + coprocessor methods, the listed classes will be called in order. After + implementing your own MasterObserver, just put it in HBase's classpath + and add the fully qualified class name here. + + hbase.coprocessor.abortonerror false @@ -853,15 +943,6 @@ state. - - dfs.support.append - true - Does HDFS allow appends to files? - This is an hdfs config. set in here so the hdfs client will do append support. - You must ensure that this config. is true serverside too when running hbase - (You will have to restart your cluster after setting it). - - hbase.thrift.minWorkerThreads 16 @@ -929,37 +1010,6 @@ have their times exposed through Hadoop metrics per CF and per region. - - hbase.master.hfilecleaner.plugins - org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner - A comma-separated list of HFileCleanerDelegate invoked by - the HFileCleaner service. These HFiles cleaners are called in order, - so put the cleaner that prunes the most files in front. To - implement your own HFileCleanerDelegate, just put it in HBase's classpath - and add the fully qualified class name here. Always add the above - default log cleaners in the list as they will be overwritten in hbase-site.xml. - - - - hbase.regionserver.catalog.timeout - 600000 - Timeout value for the Catalog Janitor from the regionserver to META. - - - hbase.master.catalog.timeout - 600000 - Timeout value for the Catalog Janitor from the master to META. - - - hbase.config.read.zookeeper.config - false - - Set to true to allow HBaseConfiguration to read the - zoo.cfg file for ZooKeeper properties. Switching this to true - is not recommended, since the functionality of reading ZK - properties from a zoo.cfg file has been deprecated. - - hbase.snapshot.enabled true @@ -967,39 +1017,6 @@ Set to true to allow snapshots to be taken / restored / cloned. - - hbase.rest.threads.max - 100 - - The maximum number of threads of the REST server thread pool. - Threads in the pool are reused to process REST requests. This - controls the maximum number of requests processed concurrently. - It may help to control the memory used by the REST server to - avoid OOM issues. If the thread pool is full, incoming requests - will be queued up and wait for some free threads. The default - is 100. - - - - hbase.rest.threads.min - 2 - - The minimum number of threads of the REST server thread pool. - The thread pool always has at least these number of threads so - the REST server is ready to serve incoming requests. The default - is 2. - - - - hbase.rpc.timeout - 60000 - - This is for the RPC layer to define how long HBase client applications - take for a remote call to time out. It uses pings to check connections - but will eventually throw a TimeoutException. - The default value is 60000ms(60s). - - hbase.server.compactchecker.interval.multiplier 1000 diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index 81cc3c091f3..edb57cb3aee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -24,7 +24,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; /** @@ -79,8 +78,9 @@ public class CompactionConfiguration { throttlePoint = conf.getLong("hbase.regionserver.thread.compaction.throttle", 2 * maxFilesToCompact * storeConfigInfo.getMemstoreFlushSize()); shouldDeleteExpired = conf.getBoolean("hbase.store.delete.expired.storefile", true); - majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24); - majorCompactionJitter = conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.20F); + majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24*7); + // Make it 0.5 so jitter has us fall evenly either side of when the compaction should run + majorCompactionJitter = conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F); LOG.info("Compaction configuration " + this.toString()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 4ae00966be4..9d3fb3089d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1064,7 +1064,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for(byte[] family : families) { - desc.addFamily(new HColumnDescriptor(family)); + HColumnDescriptor hcd = new HColumnDescriptor(family); + // Disable blooms (they are on by default as of 0.95) but we disable them here because + // tests have hard coded counts of what to expect in block cache, etc., and blooms being + // on is interfering. + hcd.setBloomFilterType(BloomType.NONE); + desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); return new HTable(c, tableName); @@ -1118,8 +1123,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family) - .setMaxVersions(numVersions); + HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java index a7c44ab9e91..9529a454bad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java @@ -95,7 +95,9 @@ public class TestMultiVersions { @Test public void testTimestamps() throws Exception { HTableDescriptor desc = new HTableDescriptor("testTimestamps"); - desc.addFamily(new HColumnDescriptor(TimestampTestBase.FAMILY_NAME)); + HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME); + hcd.setMaxVersions(3); + desc.addFamily(hcd); this.admin.createTable(desc); HTable table = new HTable(UTIL.getConfiguration(), desc.getName()); // TODO: Remove these deprecated classes or pull them in here if this is @@ -134,7 +136,9 @@ public class TestMultiVersions { final long timestamp1 = 100L; final long timestamp2 = 200L; final HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(contents)); + HColumnDescriptor hcd = new HColumnDescriptor(contents); + hcd.setMaxVersions(3); + desc.addFamily(hcd); this.admin.createTable(desc); Put put = new Put(row, timestamp1); put.add(contents, contents, value1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index a6dd7463c47..ce94325b6bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -171,7 +171,7 @@ public class TestFromClientSide { final byte[] T2 = Bytes.toBytes("T2"); final byte[] T3 = Bytes.toBytes("T3"); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) - .setKeepDeletedCells(true); + .setKeepDeletedCells(true).setMaxVersions(3); HTableDescriptor desc = new HTableDescriptor(TABLENAME); desc.addFamily(hcd); @@ -1730,7 +1730,7 @@ public class TestFromClientSide { byte [][] VALUES = makeN(VALUE, 5); long [] ts = {1000, 2000, 3000, 4000, 5000}; - HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES); + HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, 3); Put put = new Put(ROW); put.add(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); @@ -4459,7 +4459,7 @@ public class TestFromClientSide { conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); final HTable table = TEST_UTIL.createTable(tableName, - new byte[][] { FAMILY }, conf); + new byte[][] { FAMILY }, conf, 3); table.setAutoFlush(true); final long ts = EnvironmentEdgeManager.currentTimeMillis(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java index c4edcaf1d6b..6540d4aa629 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java @@ -47,7 +47,7 @@ public class TestColumnPrefixFilter { public void testColumnPrefixFilter() throws IOException { String family = "Family"; HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); - htd.addFamily(new HColumnDescriptor(family)); + htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3)); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(), htd); @@ -109,7 +109,7 @@ public class TestColumnPrefixFilter { public void testColumnPrefixFilterWithFilterList() throws IOException { String family = "Family"; HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); - htd.addFamily(new HColumnDescriptor(family)); + htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3)); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(), htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index dbfdfb09708..54d9731b580 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -71,8 +71,12 @@ public class TestDependentColumnFilter { testVals = makeTestVals(); HTableDescriptor htd = new HTableDescriptor(this.getClass().getName()); - htd.addFamily(new HColumnDescriptor(FAMILIES[0])); - htd.addFamily(new HColumnDescriptor(FAMILIES[1])); + HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]); + hcd0.setMaxVersions(3); + htd.addFamily(hcd0); + HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]); + hcd1.setMaxVersions(3); + htd.addFamily(hcd1); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java index dc014286b01..d360da6d6e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java @@ -47,7 +47,9 @@ public class TestMultipleColumnPrefixFilter { public void testMultipleColumnPrefixFilter() throws IOException { String family = "Family"; HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); - htd.addFamily(new HColumnDescriptor(family)); + HColumnDescriptor hcd = new HColumnDescriptor(family); + hcd.setMaxVersions(3); + htd.addFamily(hcd); // HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. @@ -109,8 +111,12 @@ public class TestMultipleColumnPrefixFilter { String family1 = "Family1"; String family2 = "Family2"; HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); - htd.addFamily(new HColumnDescriptor(family1)); - htd.addFamily(new HColumnDescriptor(family2)); + HColumnDescriptor hcd1 = new HColumnDescriptor(family1); + hcd1.setMaxVersions(3); + htd.addFamily(hcd1); + HColumnDescriptor hcd2 = new HColumnDescriptor(family2); + hcd2.setMaxVersions(3); + htd.addFamily(hcd2); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(), htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java index a82594bc1f5..428f304f5d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java @@ -27,6 +27,7 @@ import java.util.Map; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Get; @@ -34,9 +35,9 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.LruBlockCache; +import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.MultiThreadedWriter; import org.apache.hadoop.hbase.util.Strings; import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator; import org.junit.Test; @@ -91,17 +92,16 @@ public class TestEncodedSeekers { @Test public void testEncodedSeeker() throws IOException { System.err.println("Testing encoded seekers for encoding " + encoding); - LruBlockCache cache = (LruBlockCache) - new CacheConfig(testUtil.getConfiguration()).getBlockCache(); + LruBlockCache cache = + (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache(); cache.clearCache(); - - HRegion region = testUtil.createTestRegion( - TABLE_NAME, new HColumnDescriptor(CF_NAME) - .setMaxVersions(MAX_VERSIONS) - .setDataBlockEncoding(encoding) - .setEncodeOnDisk(encodeOnDisk) - .setBlocksize(BLOCK_SIZE) - ); + // Need to disable default row bloom filter for this test to pass. + HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS). + setDataBlockEncoding(encoding). + setEncodeOnDisk(encodeOnDisk). + setBlocksize(BLOCK_SIZE). + setBloomFilterType(BloomType.NONE); + HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd); //write the data, but leave some in the memstore doPuts(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 23c14c8ccc5..20937a32568 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -133,7 +133,7 @@ public class TestImportExport { @Test public void testSimpleCase() throws Exception { String EXPORT_TABLE = "exportSimpleCase"; - HTable t = UTIL.createTable(Bytes.toBytes(EXPORT_TABLE), FAMILYA); + HTable t = UTIL.createTable(Bytes.toBytes(EXPORT_TABLE), FAMILYA, 3); Put p = new Put(ROW1); p.add(FAMILYA, QUAL, now, QUAL); p.add(FAMILYA, QUAL, now+1, QUAL); @@ -153,7 +153,7 @@ public class TestImportExport { assertTrue(runExport(args)); String IMPORT_TABLE = "importTableSimpleCase"; - t = UTIL.createTable(Bytes.toBytes(IMPORT_TABLE), FAMILYB); + t = UTIL.createTable(Bytes.toBytes(IMPORT_TABLE), FAMILYB, 3); args = new String[] { "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, IMPORT_TABLE, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index 3be5a68aba1..d1d56eae713 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -56,6 +56,7 @@ public class TestColumnSeeking { HColumnDescriptor hcd = new HColumnDescriptor(familyBytes).setMaxVersions(1000); + hcd.setMaxVersions(3); HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); @@ -168,7 +169,9 @@ public class TestColumnSeeking { String table = "TestSingleVersions"; HTableDescriptor htd = new HTableDescriptor(table); - htd.addFamily(new HColumnDescriptor(family)); + HColumnDescriptor hcd = new HColumnDescriptor(family); + hcd.setMaxVersions(3); + htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); HRegion region = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 07f02ade105..796c9c4c7b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -4014,7 +4014,10 @@ public class TestHRegion extends HBaseTestCase { HTableDescriptor htd = new HTableDescriptor(tableName); htd.setReadOnly(isReadOnly); for(byte [] family : families) { - htd.addFamily(new HColumnDescriptor(family)); + HColumnDescriptor hcd = new HColumnDescriptor(family); + // Set default to be three versions. + hcd.setMaxVersions(Integer.MAX_VALUE); + htd.addFamily(hcd); } HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false); Path path = new Path(DIR + callingMethod); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java index ea1184c8f73..4292b6cbbdd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionBusyWait.java @@ -35,6 +35,8 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) @SuppressWarnings("deprecation") public class TestHRegionBusyWait extends TestHRegion { + // TODO: This subclass runs all the tests in TestHRegion as well as the test below which means + // all TestHRegion tests are run twice. public TestHRegionBusyWait() { conf.set("hbase.busy.wait.duration", "1000"); } @@ -87,4 +89,4 @@ public class TestHRegionBusyWait extends TestHRegion { region = null; } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index 93ceaab00ba..a59d08267ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -143,6 +143,7 @@ public class TestSeekOptimizations { new HColumnDescriptor(FAMILY) .setCompressionType(comprAlgo) .setBloomFilterType(bloomType) + .setMaxVersions(3) ); // Delete the given timestamp and everything before. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index acc2ac255b4..e2bfa3bd4eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -120,6 +120,7 @@ public class TestReplicationBase { HTableDescriptor table = new HTableDescriptor(tableName); HColumnDescriptor fam = new HColumnDescriptor(famName); + fam.setMaxVersions(3); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); fam = new HColumnDescriptor(noRepfamName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index a47a24ac802..633a0b6a615 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -78,9 +78,9 @@ public class TestRemoteTable { HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); if (!admin.tableExists(TABLE)) { HTableDescriptor htd = new HTableDescriptor(TABLE); - htd.addFamily(new HColumnDescriptor(COLUMN_1)); - htd.addFamily(new HColumnDescriptor(COLUMN_2)); - htd.addFamily(new HColumnDescriptor(COLUMN_3)); + htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3)); + htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3)); + htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3)); admin.createTable(htd); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE); Put put = new Put(ROW_1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index d6dbff9d2b9..d62d9923ef9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -76,9 +76,8 @@ public class TestThriftHBaseServiceHandler { private static byte[] valueAname = Bytes.toBytes("valueA"); private static byte[] valueBname = Bytes.toBytes("valueB"); private static HColumnDescriptor[] families = new HColumnDescriptor[] { - new HColumnDescriptor(familyAname), - new HColumnDescriptor(familyBname) - .setMaxVersions(2) + new HColumnDescriptor(familyAname).setMaxVersions(3), + new HColumnDescriptor(familyBname).setMaxVersions(2) };