HBASE-2514 RegionServer should refuse to be assigned a region that use LZO when LZO isn't available

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1026538 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Ryan Rawson 2010-10-23 00:39:14 +00:00
parent 89a5eb519a
commit 5deb69715c
7 changed files with 159 additions and 14 deletions

View File

@ -1032,6 +1032,8 @@ Release 0.21.0 - Unreleased
HBASE-3133 Only log compaction requests when a request is actually added
to the queue
HBASE-3132 Print TimestampRange and BloomFilters in HFile pretty print
HBASE-2514 RegionServer should refuse to be assigned a region that use
LZO when LZO isn't available
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -71,7 +71,9 @@ public final class Compression {
}
/**
* Compression algorithms.
* Compression algorithms. The ordinal of these cannot change or else you
* risk breaking all existing HFiles out there. Even the ones that are
* not compressed! (They use the NONE algorithm)
*/
public static enum Algorithm {
LZO("lzo") {

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.util.BloomFilter;
import org.apache.hadoop.hbase.util.ByteBloomFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.IOUtils;
@ -827,20 +828,20 @@ public class HFile {
String clazzName = Bytes.toString(fi.get(FileInfo.COMPARATOR));
this.comparator = getComparator(clazzName);
int allIndexSize = (int)(this.fileSize - this.trailer.dataIndexOffset - FixedFileTrailer.trailerSize());
byte[] dataAndMetaIndex = readAllIndex(this.istream, this.trailer.dataIndexOffset, allIndexSize);
int allIndexSize = (int)(this.fileSize - this.trailer.dataIndexOffset - FixedFileTrailer.trailerSize());
byte[] dataAndMetaIndex = readAllIndex(this.istream, this.trailer.dataIndexOffset, allIndexSize);
ByteArrayInputStream bis = new ByteArrayInputStream(dataAndMetaIndex);
DataInputStream dis = new DataInputStream(bis);
ByteArrayInputStream bis = new ByteArrayInputStream(dataAndMetaIndex);
DataInputStream dis = new DataInputStream(bis);
// Read in the data index.
this.blockIndex =
BlockIndex.readIndex(this.comparator, dis, this.trailer.dataIndexCount);
this.blockIndex =
BlockIndex.readIndex(this.comparator, dis, this.trailer.dataIndexCount);
// Read in the metadata index.
if (trailer.metaIndexCount > 0) {
this.metaIndex = BlockIndex.readIndex(Bytes.BYTES_RAWCOMPARATOR, dis,
this.trailer.metaIndexCount);
this.metaIndex = BlockIndex.readIndex(Bytes.BYTES_RAWCOMPARATOR, dis,
this.trailer.metaIndexCount);
}
this.fileInfoLoaded = true;
@ -885,6 +886,9 @@ public class HFile {
// Set up the codec.
this.compressAlgo =
Compression.Algorithm.values()[fft.compressionCodec];
CompressionTest.testCompression(this.compressAlgo);
return fft;
}

View File

@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
@ -432,7 +433,7 @@ public class HRegion implements HeapSize { // , Writable{
public boolean isClosing() {
return this.closing.get();
}
boolean areWritesEnabled() {
synchronized(this.writestate) {
return this.writestate.writesEnabled;
@ -741,7 +742,7 @@ public class HRegion implements HeapSize { // , Writable{
} finally {
long now = EnvironmentEdgeManager.currentTimeMillis();
LOG.info(((completed) ? "completed" : "aborted")
+ " compaction on region " + this
+ " compaction on region " + this
+ " after " + StringUtils.formatTimeDiff(now, startTime));
}
} finally {
@ -1878,7 +1879,7 @@ public class HRegion implements HeapSize { // , Writable{
LOG.warn("File corruption encountered! " +
"Continuing, but renaming " + edits + " as " + p, ioe);
} else {
// other IO errors may be transient (bad network connection,
// other IO errors may be transient (bad network connection,
// checksum exception on one datanode, etc). throw & retry
throw ioe;
}
@ -2463,6 +2464,8 @@ public class HRegion implements HeapSize { // , Writable{
*/
protected HRegion openHRegion(final Progressable reporter)
throws IOException {
checkCompressionCodecs();
long seqid = initialize(reporter);
if (this.log != null) {
this.log.setSequenceNumber(seqid);
@ -2470,6 +2473,13 @@ public class HRegion implements HeapSize { // , Writable{
return this;
}
private void checkCompressionCodecs() throws IOException {
for (HColumnDescriptor fam: regionInfo.getTableDesc().getColumnFamilies()) {
CompressionTest.testCompression(fam.getCompression());
CompressionTest.testCompression(fam.getCompactionCompression());
}
}
/**
* Inserts a new region's meta information into the passed
* <code>meta</code> region. Used by the HMaster bootstrap code adding

View File

@ -108,6 +108,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.WALObserver;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.InfoServer;
import org.apache.hadoop.hbase.util.Pair;
@ -263,6 +264,17 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
this.connection = HConnectionManager.getConnection(conf);
this.isOnline = false;
// check to see if the codec list is available:
String [] codecs = conf.getStrings("hbase.regionserver.codecs", null);
if (codecs != null) {
for (String codec : codecs) {
if (!CompressionTest.testCompression(codec)) {
throw new IOException("Compression codec " + codec +
" not supported, aborting RS construction");
}
}
}
// Config'ed params
this.numRetries = conf.getInt("hbase.client.retries.number", 2);
this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY,
@ -2493,7 +2505,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
.getConstructor(Configuration.class);
return c.newInstance(conf2);
} catch (Exception e) {
throw new RuntimeException("Failed construction of " + "Master: "
throw new RuntimeException("Failed construction of " + "Regionserver: "
+ regionServerClass.toString(), e);
}
}

View File

@ -19,11 +19,16 @@
*/
package org.apache.hadoop.hbase.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.io.compress.Compressor;
import java.io.IOException;
import java.net.URI;
/**
@ -31,6 +36,59 @@ import java.net.URI;
* on every node in your cluster.
*/
public class CompressionTest {
static final Log LOG = LogFactory.getLog(CompressionTest.class);
public static boolean testCompression(String codec) {
codec = codec.toLowerCase();
Compression.Algorithm a;
try {
a = Compression.getCompressionAlgorithmByName(codec);
} catch (IllegalArgumentException e) {
LOG.warn("Codec type: " + codec + " is not known");
return false;
}
try {
testCompression(a);
return true;
} catch (IOException ignored) {
LOG.warn("Can't instantiate codec: " + codec, ignored);
return false;
}
}
private final static Boolean[] compressionTestResults
= new Boolean[Compression.Algorithm.values().length];
static {
for (int i = 0 ; i < compressionTestResults.length ; ++i) {
compressionTestResults[i] = null;
}
}
public static void testCompression(Compression.Algorithm algo)
throws IOException {
if (compressionTestResults[algo.ordinal()] != null) {
if (compressionTestResults[algo.ordinal()]) {
return ; // already passed test, dont do it again.
} else {
// failed.
throw new IOException("Compression algorithm '" + algo.getName() + "'" +
" previously failed test.");
}
}
try {
Compressor c = algo.getCompressor();
algo.returnCompressor(c);
compressionTestResults[algo.ordinal()] = true; // passes
} catch (Throwable t) {
compressionTestResults[algo.ordinal()] = false; // failure
throw new IOException(t);
}
}
protected static Path path = new Path(".hfile-comp-test");
public static void usage() {
@ -51,7 +109,6 @@ public class CompressionTest {
if (dfs != null) {
try {
dfs.close();
dfs = null;
} catch (Exception e) {
e.printStackTrace();
}

View File

@ -0,0 +1,58 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.*;
public class TestCompressionTest {
@Test
public void testTestCompression() {
// This test will fail if you run the tests with LZO compression available.
try {
CompressionTest.testCompression(Compression.Algorithm.LZO);
fail(); // always throws
} catch (IOException e) {
// there should be a 'cause'.
assertNotNull(e.getCause());
}
// this is testing the caching of the test results.
try {
CompressionTest.testCompression(Compression.Algorithm.LZO);
fail(); // always throws
} catch (IOException e) {
// there should be NO cause because it's a direct exception not wrapped
assertNull(e.getCause());
}
assertFalse(CompressionTest.testCompression("LZO"));
assertTrue(CompressionTest.testCompression("NONE"));
assertTrue(CompressionTest.testCompression("GZ"));
}
}