diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java index 2d9e91c4284..86d561d30f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -94,10 +94,22 @@ public interface ColumnFamilyDescriptor { * @return Compression type setting. */ Compression.Algorithm getCompactionCompressionType(); + + /** + * @return Compression type setting for major compactions. + */ + Compression.Algorithm getMajorCompactionCompressionType(); + + /** + * @return Compression type setting for minor compactions. + */ + Compression.Algorithm getMinorCompactionCompressionType(); + /** * @return Compression type setting. */ Compression.Algorithm getCompressionType(); + /** * @return an unmodifiable map. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 17479927691..06a2aec8cbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -75,6 +75,10 @@ public class ColumnFamilyDescriptorBuilder { @InterfaceAudience.Private public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; private static final Bytes COMPRESSION_COMPACT_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); + public static final String COMPRESSION_COMPACT_MAJOR = "COMPRESSION_COMPACT_MAJOR"; + private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); + public static final String COMPRESSION_COMPACT_MINOR = "COMPRESSION_COMPACT_MINOR"; + private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); @InterfaceAudience.Private public static final String DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING"; private static final Bytes DATA_BLOCK_ENCODING_BYTES = new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); @@ -449,6 +453,16 @@ public class ColumnFamilyDescriptorBuilder { return this; } + public ColumnFamilyDescriptorBuilder setMajorCompactionCompressionType(Compression.Algorithm value) { + desc.setMajorCompactionCompressionType(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setMinorCompactionCompressionType(Compression.Algorithm value) { + desc.setMinorCompactionCompressionType(value); + return this; + } + public ColumnFamilyDescriptorBuilder setCompressTags(boolean value) { desc.setCompressTags(value); return this; @@ -839,6 +853,18 @@ public class ColumnFamilyDescriptorBuilder { n -> Compression.Algorithm.valueOf(n.toUpperCase()), getCompressionType()); } + @Override + public Compression.Algorithm getMajorCompactionCompressionType() { + return getStringOrDefault(COMPRESSION_COMPACT_MAJOR_BYTES, + n -> Compression.Algorithm.valueOf(n.toUpperCase()), getCompactionCompressionType()); + } + + @Override + public Compression.Algorithm getMinorCompactionCompressionType() { + return getStringOrDefault(COMPRESSION_COMPACT_MINOR_BYTES, + n -> Compression.Algorithm.valueOf(n.toUpperCase()), getCompactionCompressionType()); + } + /** * Compression types supported in hbase. LZO is not bundled as part of the * hbase distribution. See @@ -853,6 +879,16 @@ public class ColumnFamilyDescriptorBuilder { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); } + public ModifyableColumnFamilyDescriptor setMajorCompactionCompressionType( + Compression.Algorithm type) { + return setValue(COMPRESSION_COMPACT_MAJOR_BYTES, type.name()); + } + + public ModifyableColumnFamilyDescriptor setMinorCompactionCompressionType( + Compression.Algorithm type) { + return setValue(COMPRESSION_COMPACT_MINOR_BYTES, type.name()); + } + @Override public boolean isInMemory() { return getStringOrDefault(IN_MEMORY_BYTES, Boolean::valueOf, DEFAULT_IN_MEMORY); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index 14f88786b34..0531b138d74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -147,10 +147,11 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { @Override public StoreFileWriter createWriter(InternalScanner scanner, org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd, - boolean shouldDropBehind) throws IOException { + boolean shouldDropBehind, boolean major) throws IOException { // make this writer with tags always because of possible new cells with tags. - return store.createWriterInTmp(fd.maxKeyCount, compactionCompression, true, true, true, - shouldDropBehind); + return store.createWriterInTmp(fd.maxKeyCount, + major ? majorCompactionCompression : minorCompactionCompression, + true, true, true, shouldDropBehind); } }; @@ -350,7 +351,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { Cell mobCell = null; try { - mobFileWriter = newMobWriter(fd); + mobFileWriter = newMobWriter(fd, major); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); do { @@ -428,7 +429,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { LOG.debug("Closing output MOB File, length={} file={}, store={}", len, mobFileWriter.getPath().getName(), getStoreInfo()); commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major); - mobFileWriter = newMobWriter(fd); + mobFileWriter = newMobWriter(fd, major); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); mobCells = 0; } @@ -472,7 +473,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { long len = mobFileWriter.getPos(); if (len > maxMobFileSize) { commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major); - mobFileWriter = newMobWriter(fd); + mobFileWriter = newMobWriter(fd, major); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); mobCells = 0; } @@ -524,7 +525,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { long len = mobFileWriter.getPos(); if (len > maxMobFileSize) { commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major); - mobFileWriter = newMobWriter(fd); + mobFileWriter = newMobWriter(fd, major); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); mobCells = 0; } @@ -611,11 +612,12 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { } } - private StoreFileWriter newMobWriter(FileDetails fd) + private StoreFileWriter newMobWriter(FileDetails fd, boolean major) throws IOException { try { StoreFileWriter mobFileWriter = mobStore.createWriterInTmp(new Date(fd.latestPutTs), - fd.maxKeyCount, compactionCompression, store.getRegionInfo().getStartKey(), true); + fd.maxKeyCount, major ? majorCompactionCompression : minorCompactionCompression, + store.getRegionInfo().getStartKey(), true); LOG.debug("New MOB writer created={} store={}", mobFileWriter.getPath().getName(), getStoreInfo()); // Add reference we get for compact MOB diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java index f2816d89686..42841bfee53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java @@ -47,17 +47,17 @@ public abstract class AbstractMultiOutputCompactor { protected final HStore store; protected final int compactionKVMax; - protected final Compression.Algorithm compactionCompression; + protected final Compression.Algorithm majorCompactionCompression; + protected final Compression.Algorithm minorCompactionCompression; /** specify how many days to keep MVCC values during major compaction **/ protected int keepSeqIdPeriod; @@ -96,8 +97,10 @@ public abstract class Compactor { this.store = store; this.compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); - this.compactionCompression = (this.store.getColumnFamilyDescriptor() == null) ? - Compression.Algorithm.NONE : this.store.getColumnFamilyDescriptor().getCompactionCompressionType(); + this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? + Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); + this.minorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? + Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMinorCompactionCompressionType(); this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD); this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true); @@ -107,7 +110,7 @@ public abstract class Compactor { protected interface CellSinkFactory { - S createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind) + S createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind, boolean major) throws IOException; } @@ -139,10 +142,11 @@ public abstract class Compactor { * Extracts some details about the files to compact that are commonly needed by compactors. * @param filesToCompact Files. * @param allFiles Whether all files are included for compaction + * @parma major If major compaction * @return The result. */ private FileDetails getFileDetails( - Collection filesToCompact, boolean allFiles) throws IOException { + Collection filesToCompact, boolean allFiles, boolean major) throws IOException { FileDetails fd = new FileDetails(); long oldestHFileTimestampToKeepMVCC = System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); @@ -212,7 +216,7 @@ public abstract class Compactor { r.getBloomFilterType().toString(), TraditionalBinaryPrefix.long2String(r.length(), "", 1), r.getHFileReader().getDataBlockEncoding(), - compactionCompression, + major ? majorCompactionCompression : minorCompactionCompression, seqNum, (allFiles? ", earliestPutTs=" + earliestPutTs: "")); } @@ -263,21 +267,23 @@ public abstract class Compactor { * @return Writer for a new StoreFile in the tmp dir. * @throws IOException if creation failed */ - protected final StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind) + protected final StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind, boolean major) throws IOException { // When all MVCC readpoints are 0, don't write them. // See HBASE-8166, HBASE-12600, and HBASE-13389. - return store - .createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true, fd.maxMVCCReadpoint > 0, + return store.createWriterInTmp(fd.maxKeyCount, + major ? majorCompactionCompression : minorCompactionCompression, + true, fd.maxMVCCReadpoint > 0, fd.maxTagsLength > 0, shouldDropBehind, fd.totalCompactedFilesSize, HConstants.EMPTY_STRING); } protected final StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind, - String fileStoragePolicy) throws IOException { - return store - .createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true, fd.maxMVCCReadpoint > 0, - fd.maxTagsLength > 0, shouldDropBehind, fd.totalCompactedFilesSize, fileStoragePolicy); + String fileStoragePolicy, boolean major) throws IOException { + return store.createWriterInTmp(fd.maxKeyCount, + major ? majorCompactionCompression : minorCompactionCompression, + true, fd.maxMVCCReadpoint > 0, + fd.maxTagsLength > 0, shouldDropBehind, fd.totalCompactedFilesSize, fileStoragePolicy); } private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, @@ -308,7 +314,7 @@ public abstract class Compactor { protected final List compact(final CompactionRequestImpl request, InternalScannerFactory scannerFactory, CellSinkFactory sinkFactory, ThroughputController throughputController, User user) throws IOException { - FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles()); + FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles(), request.isMajor()); this.progress = new CompactionProgress(fd.maxKeyCount); // Find the smallest read point across all the Scanners. @@ -338,7 +344,7 @@ public abstract class Compactor { smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint); cleanSeqId = true; } - writer = sinkFactory.createWriter(scanner, fd, dropCache); + writer = sinkFactory.createWriter(scanner, fd, dropCache, request.isMajor()); finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, throughputController, request.isAllFiles(), request.getFiles().size()); if (!finished) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java index ef64df1efe2..fd543308290 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java @@ -68,11 +68,11 @@ public class DateTieredCompactor extends AbstractMultiOutputCompactor { @Override public StoreFileWriter createWriter(InternalScanner scanner, org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd, - boolean shouldDropBehind) throws IOException { - return createTmpWriter(fd, shouldDropBehind); + boolean shouldDropBehind, boolean major) throws IOException { + return createTmpWriter(fd, shouldDropBehind, major); } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java index fe07d9e888f..547555e3812 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java @@ -93,10 +93,10 @@ public class StripeCompactor extends AbstractMultiOutputCompactor