HBASE-6334 TestImprovement for TestHRegion.testWritesWhileGetting; REVERT ACCIDENTAL HMASTER COMMIT

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1360936 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-07-12 20:53:09 +00:00
parent 4c72558f62
commit 6d64175d1e
1 changed files with 0 additions and 25 deletions

View File

@ -101,7 +101,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.InfoServer; import org.apache.hadoop.hbase.util.InfoServer;
@ -286,9 +285,6 @@ Server {
*/ */
private ObjectName mxBean = null; private ObjectName mxBean = null;
//should we check the compression codec type at master side, default true, HBASE-6370
private final boolean masterCheckCompression;
/** /**
* Initializes the HMaster. The steps are as follows: * Initializes the HMaster. The steps are as follows:
* <p> * <p>
@ -356,9 +352,6 @@ Server {
this.metrics = new MasterMetrics(getServerName().toString()); this.metrics = new MasterMetrics(getServerName().toString());
// metrics interval: using the same property as region server. // metrics interval: using the same property as region server.
this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000);
//should we check the compression codec type at master side, default true, HBASE-6370
this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
} }
/** /**
@ -1383,7 +1376,6 @@ Server {
HRegionInfo [] newRegions = getHRegionInfos(hTableDescriptor, splitKeys); HRegionInfo [] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized(); checkInitialized();
checkCompression(hTableDescriptor);
if (cpHost != null) { if (cpHost != null) {
cpHost.preCreateTable(hTableDescriptor, newRegions); cpHost.preCreateTable(hTableDescriptor, newRegions);
} }
@ -1397,21 +1389,6 @@ Server {
} }
private void checkCompression(final HTableDescriptor htd)
throws IOException {
if (!this.masterCheckCompression) return;
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
checkCompression(hcd);
}
}
private void checkCompression(final HColumnDescriptor hcd)
throws IOException {
if (!this.masterCheckCompression) return;
CompressionTest.testCompression(hcd.getCompression());
CompressionTest.testCompression(hcd.getCompactionCompression());
}
@Override @Override
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
throws ServiceException { throws ServiceException {
@ -1528,7 +1505,6 @@ Server {
try { try {
checkInitialized(); checkInitialized();
checkCompression(descriptor);
if (cpHost != null) { if (cpHost != null) {
if (cpHost.preModifyColumn(tableName, descriptor)) { if (cpHost.preModifyColumn(tableName, descriptor)) {
return ModifyColumnResponse.newBuilder().build(); return ModifyColumnResponse.newBuilder().build();
@ -1650,7 +1626,6 @@ Server {
HTableDescriptor htd = HTableDescriptor.convert(req.getTableSchema()); HTableDescriptor htd = HTableDescriptor.convert(req.getTableSchema());
try { try {
checkInitialized(); checkInitialized();
checkCompression(htd);
if (cpHost != null) { if (cpHost != null) {
cpHost.preModifyTable(tableName, htd); cpHost.preModifyTable(tableName, htd);
} }