From ee48ff48a19edf0ac446a7ed34423bdba75b4bce Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Wed, 26 Feb 2014 22:24:20 +0000 Subject: [PATCH] HBASE-10591 Sanity check table configuration in createTable git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1572301 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/hbase/HTableDescriptor.java | 14 ++- .../apache/hadoop/hbase/master/HMaster.java | 107 ++++++++++++++++-- .../hbase/regionserver/RegionSplitPolicy.java | 2 +- .../apache/hadoop/hbase/TestZooKeeper.java | 4 +- .../apache/hadoop/hbase/client/TestAdmin.java | 14 ++- .../hbase/client/TestFromClientSide.java | 90 ++++++++++++++- .../TestFromClientSideWithCoprocessor.java | 1 + .../src/test/resources/hbase-site.xml | 6 + 8 files changed, 221 insertions(+), 17 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index d54f9b22aee..59b139417e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -34,7 +34,6 @@ import java.util.TreeMap; import java.util.TreeSet; import java.util.regex.Matcher; -import com.google.protobuf.HBaseZeroCopyByteString; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -54,6 +53,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.WritableComparable; +import com.google.protobuf.HBaseZeroCopyByteString; import com.google.protobuf.InvalidProtocolBufferException; /** @@ -655,7 +655,17 @@ public class HTableDescriptor implements WritableComparable { } /** - * This get the class associated with the region split policy which + * This sets the class associated with the region split policy which + * determines when a region split should occur. The class used by + * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy} + * @param clazz the class name + */ + public void setRegionSplitPolicyClassName(String clazz) { + setValue(SPLIT_POLICY, clazz); + } + + /** + * This gets the class associated with the region split policy which * determines when a region split should occur. The class used by * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy} * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index eae4040e7c6..d8c89f20c82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -116,10 +117,10 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; @@ -147,6 +148,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJani import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; @@ -163,6 +166,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorE import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; @@ -197,10 +202,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; @@ -210,6 +211,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; +import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; @@ -1745,7 +1747,7 @@ MasterServices, Server { HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys); checkInitialized(); - checkCompression(hTableDescriptor); + sanityCheckTableDescriptor(hTableDescriptor); if (cpHost != null) { cpHost.preCreateTable(hTableDescriptor, newRegions); } @@ -1759,6 +1761,97 @@ MasterServices, Server { } + /** + * Checks whether the table conforms to some sane limits, and configured + * values (compression, etc) work. Throws an exception if something is wrong. + * @throws IOException + */ + private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException { + final String CONF_KEY = "hbase.table.sanity.checks"; + if (!conf.getBoolean(CONF_KEY, true)) { + return; + } + String tableVal = htd.getConfigurationValue(CONF_KEY); + if (tableVal != null && !Boolean.valueOf(tableVal)) { + return; + } + + // check max file size + long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit + long maxFileSize = htd.getMaxFileSize(); + if (maxFileSize < 0) { + maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit); + } + if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { + throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or " + + "\"hbase.hregion.max.filesize\" (" + maxFileSize + + ") is too small, which might cause over splitting into unmanageable " + + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor " + + "if you want to bypass sanity checks"); + } + + // check flush size + long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit + long flushSize = htd.getMemStoreFlushSize(); + if (flushSize < 0) { + flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit); + } + if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { + throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or " + + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause" + + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor " + + "if you want to bypass sanity checks"); + } + + // check split policy class can be loaded + try { + RegionSplitPolicy.getSplitPolicyClass(htd, conf); + } catch (Exception ex) { + throw new DoNotRetryIOException(ex); + } + + // check compression can be loaded + checkCompression(htd); + + // check that we have at least 1 CF + if (htd.getColumnFamilies().length == 0) { + throw new DoNotRetryIOException("Table should have at least one column family " + + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks"); + } + + for (HColumnDescriptor hcd : htd.getColumnFamilies()) { + if (hcd.getTimeToLive() <= 0) { + throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString() + + " must be positive. Set " + CONF_KEY + " to false at conf or table descriptor " + + "if you want to bypass sanity checks"); + } + + // check blockSize + if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { + throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString() + + " must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor " + + "if you want to bypass sanity checks"); + } + + // check versions + if (hcd.getMinVersions() < 0) { + throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString() + + " must be positive. Set " + CONF_KEY + " to false at conf or table descriptor " + + "if you want to bypass sanity checks"); + } + // max versions already being checked + + // check replication scope + if (hcd.getScope() < 0) { + throw new DoNotRetryIOException("Replication scope for column family " + + hcd.getNameAsString() + " must be positive. Set " + CONF_KEY + " to false at conf " + + "or table descriptor if you want to bypass sanity checks"); + } + + // TODO: should we check coprocessors and encryption ? + } + } + private void checkCompression(final HTableDescriptor htd) throws IOException { if (!this.masterCheckCompression) return; @@ -2039,7 +2132,7 @@ MasterServices, Server { public void modifyTable(final TableName tableName, final HTableDescriptor descriptor) throws IOException { checkInitialized(); - checkCompression(descriptor); + sanityCheckTableDescriptor(descriptor); if (cpHost != null) { cpHost.preModifyTable(tableName, descriptor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java index ae9f950002b..f2d197aa78c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java @@ -105,7 +105,7 @@ public abstract class RegionSplitPolicy extends Configured { return policy; } - static Class getSplitPolicyClass( + public static Class getSplitPolicyClass( HTableDescriptor htd, Configuration conf) throws IOException { String className = htd.getRegionSplitPolicyClassName(); if (className == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 3a31c157f98..340acf4961b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -495,7 +495,9 @@ public class TestZooKeeper { Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j") }; String tableName = "testRegionAssignmentAfterMasterRecoveryDueToZKExpiry"; - admin.createTable(new HTableDescriptor(TableName.valueOf(tableName)), SPLIT_KEYS); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); + htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); + admin.createTable(htd, SPLIT_KEYS); ZooKeeperWatcher zooKeeperWatcher = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); ZKAssign.blockUntilNoRIT(zooKeeperWatcher); m.getZooKeeperWatcher().close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 38194f970ed..66361966555 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -197,6 +197,7 @@ public class TestAdmin { exception = null; try { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(nonexistent)); + htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); this.admin.modifyTable(htd.getTableName(), htd); } catch (IOException e) { exception = e; @@ -1156,8 +1157,12 @@ public class TestAdmin { @Test (timeout=300000) public void testTableNameClash() throws Exception { String name = "testTableNameClash"; - admin.createTable(new HTableDescriptor(TableName.valueOf(name + "SOMEUPPERCASE"))); - admin.createTable(new HTableDescriptor(TableName.valueOf(name))); + HTableDescriptor htd1 = new HTableDescriptor(TableName.valueOf(name + "SOMEUPPERCASE")); + HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(name)); + htd1.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); + htd2.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); + admin.createTable(htd1); + admin.createTable(htd2); // Before fix, below would fail throwing a NoServerForRegionException. new HTable(TEST_UTIL.getConfiguration(), name).close(); } @@ -1181,8 +1186,9 @@ public class TestAdmin { byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; HBaseAdmin hbaseadmin = new HBaseAdmin(TEST_UTIL.getConfiguration()); - hbaseadmin.createTable(new HTableDescriptor(TableName.valueOf(name)), startKey, endKey, - expectedRegions); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); + htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); + hbaseadmin.createTable(htd, startKey, endKey, expectedRegions); hbaseadmin.close(); } finally { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, oldTimeout); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index bb386727260..33a18fb2c40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -85,8 +85,8 @@ import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; -import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; @@ -130,6 +130,7 @@ public class TestFromClientSide { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MultiRowMutationEndpoint.class.getName()); + conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests // We need more than one region server in this test TEST_UTIL.startMiniCluster(SLAVES); } @@ -4252,7 +4253,7 @@ public class TestFromClientSide { HTable table = TEST_UTIL.createTable(tableAname, - new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 64); + new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024); // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow // in Store.rowAtOrBeforeFromStoreFile table.setAutoFlush(true); @@ -5395,6 +5396,91 @@ public class TestFromClientSide { table.close(); } + @Test + public void testIllegalTableDescriptor() throws Exception { + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testIllegalTableDescriptor")); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); + + // create table with 0 families + checkTableIsIllegal(htd); + htd.addFamily(hcd); + checkTableIsLegal(htd); + + htd.setMaxFileSize(1024); // 1K + checkTableIsIllegal(htd); + htd.setMaxFileSize(0); + checkTableIsIllegal(htd); + htd.setMaxFileSize(1024 * 1024 * 1024); // 1G + checkTableIsLegal(htd); + + htd.setMemStoreFlushSize(1024); + checkTableIsIllegal(htd); + htd.setMemStoreFlushSize(0); + checkTableIsIllegal(htd); + htd.setMemStoreFlushSize(128 * 1024 * 1024); // 128M + checkTableIsLegal(htd); + + htd.setRegionSplitPolicyClassName("nonexisting.foo.class"); + checkTableIsIllegal(htd); + htd.setRegionSplitPolicyClassName(null); + checkTableIsLegal(htd); + + hcd.setBlocksize(0); + checkTableIsIllegal(htd); + hcd.setBlocksize(1024 * 1024 * 128); // 128M + checkTableIsIllegal(htd); + hcd.setBlocksize(1024); + checkTableIsLegal(htd); + + hcd.setTimeToLive(0); + checkTableIsIllegal(htd); + hcd.setTimeToLive(-1); + checkTableIsIllegal(htd); + hcd.setTimeToLive(1); + checkTableIsLegal(htd); + + hcd.setMinVersions(-1); + checkTableIsIllegal(htd); + hcd.setMinVersions(3); + try { + hcd.setMaxVersions(2); + fail(); + } catch (IllegalArgumentException ex) { + // expected + hcd.setMaxVersions(10); + } + checkTableIsLegal(htd); + + hcd.setScope(-1); + checkTableIsIllegal(htd); + hcd.setScope(0); + checkTableIsLegal(htd); + + // check the conf settings to disable sanity checks + htd.setMemStoreFlushSize(0); + htd.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString()); + checkTableIsLegal(htd); + } + + private void checkTableIsLegal(HTableDescriptor htd) throws IOException { + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + admin.createTable(htd); + assertTrue(admin.tableExists(htd.getTableName())); + admin.disableTable(htd.getTableName()); + admin.deleteTable(htd.getTableName()); + } + + private void checkTableIsIllegal(HTableDescriptor htd) throws IOException { + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + try { + admin.createTable(htd); + fail(); + } catch(Exception ex) { + // should throw ex + } + assertFalse(admin.tableExists(htd.getTableName())); + } + @Test public void testRawScanRespectsVersions() throws Exception { byte[] TABLE = Bytes.toBytes("testRawScan"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java index a49e94c41f7..27099a5a899 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java @@ -36,6 +36,7 @@ public class TestFromClientSideWithCoprocessor extends TestFromClientSide { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName()); + conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests // We need more than one region server in this test TEST_UTIL.startMiniCluster(SLAVES); } diff --git a/hbase-server/src/test/resources/hbase-site.xml b/hbase-server/src/test/resources/hbase-site.xml index 69c52060eef..8c8312cb108 100644 --- a/hbase-server/src/test/resources/hbase-site.xml +++ b/hbase-server/src/test/resources/hbase-site.xml @@ -141,4 +141,10 @@ version is X.X.X-SNAPSHOT" + + hbase.table.sanity.checks + false + Skip sanity checks in tests + +