From e8b21aad62bdea694446933973c19cecec961003 Mon Sep 17 00:00:00 2001 From: sershe Date: Sat, 16 Mar 2013 02:16:14 +0000 Subject: [PATCH] REVERT HBASE-8034 record on-disk data size for store file and make it available during writing git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1457193 13f79535-47bb-0310-9956-ffa450edef68 --- .../hbase/io/hfile/AbstractHFileWriter.java | 6 ---- .../apache/hadoop/hbase/io/hfile/HFile.java | 5 --- .../hadoop/hbase/regionserver/StoreFile.java | 34 ------------------- 3 files changed, 45 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java index ee49573f037..8632328eb51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java @@ -266,10 +266,4 @@ public abstract class AbstractHFileWriter implements HFile.Writer { HConstants.DATA_FILE_UMASK_KEY); return FSUtils.create(fs, path, perms); } - - @Override - public long getCurrentSize() throws IOException { - if (this.outputStream == null) return -1; - return this.outputStream.getPos(); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 392420c24a1..cf3572ffd0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -319,11 +319,6 @@ public class HFile { * HFile V2. */ void addDeleteFamilyBloomFilter(BloomFilterWriter bfw) throws IOException; - - /** - * @return Currently written raw data size on disk. - */ - long getCurrentSize() throws IOException; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 309c03affbe..e6f682f1b32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -105,9 +105,6 @@ public class StoreFile { public static final byte[] DELETE_FAMILY_COUNT = Bytes.toBytes("DELETE_FAMILY_COUNT"); - /** See {@link #getEstimatedDiskDataSize()}. */ - public static final byte[] DISK_DATA_SIZE_KEY = Bytes.toBytes("DISK_DATA_SIZE"); - /** Last Bloom filter key in FileInfo */ private static final byte[] LAST_BLOOM_KEY = Bytes.toBytes("LAST_BLOOM_KEY"); @@ -150,12 +147,6 @@ public class StoreFile { // whenever you get a Reader. private AtomicBoolean majorCompaction = null; - /** See {@link #getEstimatedDiskDataSize()}. */ - private long diskDataSize; - - /** See {@link #getEstimatedDiskDataSize()}. */ - private static double DATA_SIZE_FRACTION_ESTIMATE = 0.98; - // If true, this file should not be included in minor compactions. // It's set whenever you get a Reader. private boolean excludeFromMinorCompaction = false; @@ -287,15 +278,6 @@ public class StoreFile { return modificationTimeStamp; } - /** - * @return Estimated number of bytes taken by the data blocks of this file. Either the exact - * number written into the file metadata ({@link #DISK_DATA_SIZE_KEY}); or estimated as - * {@link #DATA_SIZE_FRACTION_ESTIMATE} of the file, if there's no such field (old files). - */ - public long getEstimatedDiskDataSize() { - return diskDataSize; - } - /** * Return the largest memstoreTS found across all storefiles in * the given list. Store files that were created by a mapreduce @@ -455,12 +437,6 @@ public class StoreFile { "proceeding without", e); this.reader.timeRangeTracker = null; } - - b = metadataMap.get(DISK_DATA_SIZE_KEY); - // Estimate which fraction of the file is data if the file doesn't have this field. - this.diskDataSize = (b != null) - ? Bytes.toLong(b) : (long)(this.reader.length() * DATA_SIZE_FRACTION_ESTIMATE); - return this.reader; } @@ -1034,12 +1010,6 @@ public class StoreFile { } public void close() throws IOException { - // Estimate data size in this file before blooms and the HFile tail blocks. - long currentSize = writer.getCurrentSize(); - if (currentSize >= 0) { - writer.appendFileInfo(DISK_DATA_SIZE_KEY, Bytes.toBytes(currentSize)); - } - boolean hasGeneralBloom = this.closeGeneralBloomFilter(); boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter(); @@ -1062,10 +1032,6 @@ public class StoreFile { HFile.Writer getHFileWriter() { return writer; } - - public long getCurrentSize() throws IOException { - return writer.getCurrentSize(); - } } /**