REVERT HBASE-8034 record on-disk data size for store file and make it available during writing
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1457193 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
eb039567de
commit
e8b21aad62
|
@ -266,10 +266,4 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
HConstants.DATA_FILE_UMASK_KEY);
|
||||
return FSUtils.create(fs, path, perms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCurrentSize() throws IOException {
|
||||
if (this.outputStream == null) return -1;
|
||||
return this.outputStream.getPos();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -319,11 +319,6 @@ public class HFile {
|
|||
* HFile V2.
|
||||
*/
|
||||
void addDeleteFamilyBloomFilter(BloomFilterWriter bfw) throws IOException;
|
||||
|
||||
/**
|
||||
* @return Currently written raw data size on disk.
|
||||
*/
|
||||
long getCurrentSize() throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -105,9 +105,6 @@ public class StoreFile {
|
|||
public static final byte[] DELETE_FAMILY_COUNT =
|
||||
Bytes.toBytes("DELETE_FAMILY_COUNT");
|
||||
|
||||
/** See {@link #getEstimatedDiskDataSize()}. */
|
||||
public static final byte[] DISK_DATA_SIZE_KEY = Bytes.toBytes("DISK_DATA_SIZE");
|
||||
|
||||
/** Last Bloom filter key in FileInfo */
|
||||
private static final byte[] LAST_BLOOM_KEY = Bytes.toBytes("LAST_BLOOM_KEY");
|
||||
|
||||
|
@ -150,12 +147,6 @@ public class StoreFile {
|
|||
// whenever you get a Reader.
|
||||
private AtomicBoolean majorCompaction = null;
|
||||
|
||||
/** See {@link #getEstimatedDiskDataSize()}. */
|
||||
private long diskDataSize;
|
||||
|
||||
/** See {@link #getEstimatedDiskDataSize()}. */
|
||||
private static double DATA_SIZE_FRACTION_ESTIMATE = 0.98;
|
||||
|
||||
// If true, this file should not be included in minor compactions.
|
||||
// It's set whenever you get a Reader.
|
||||
private boolean excludeFromMinorCompaction = false;
|
||||
|
@ -287,15 +278,6 @@ public class StoreFile {
|
|||
return modificationTimeStamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Estimated number of bytes taken by the data blocks of this file. Either the exact
|
||||
* number written into the file metadata ({@link #DISK_DATA_SIZE_KEY}); or estimated as
|
||||
* {@link #DATA_SIZE_FRACTION_ESTIMATE} of the file, if there's no such field (old files).
|
||||
*/
|
||||
public long getEstimatedDiskDataSize() {
|
||||
return diskDataSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the largest memstoreTS found across all storefiles in
|
||||
* the given list. Store files that were created by a mapreduce
|
||||
|
@ -455,12 +437,6 @@ public class StoreFile {
|
|||
"proceeding without", e);
|
||||
this.reader.timeRangeTracker = null;
|
||||
}
|
||||
|
||||
b = metadataMap.get(DISK_DATA_SIZE_KEY);
|
||||
// Estimate which fraction of the file is data if the file doesn't have this field.
|
||||
this.diskDataSize = (b != null)
|
||||
? Bytes.toLong(b) : (long)(this.reader.length() * DATA_SIZE_FRACTION_ESTIMATE);
|
||||
|
||||
return this.reader;
|
||||
}
|
||||
|
||||
|
@ -1034,12 +1010,6 @@ public class StoreFile {
|
|||
}
|
||||
|
||||
public void close() throws IOException {
|
||||
// Estimate data size in this file before blooms and the HFile tail blocks.
|
||||
long currentSize = writer.getCurrentSize();
|
||||
if (currentSize >= 0) {
|
||||
writer.appendFileInfo(DISK_DATA_SIZE_KEY, Bytes.toBytes(currentSize));
|
||||
}
|
||||
|
||||
boolean hasGeneralBloom = this.closeGeneralBloomFilter();
|
||||
boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter();
|
||||
|
||||
|
@ -1062,10 +1032,6 @@ public class StoreFile {
|
|||
HFile.Writer getHFileWriter() {
|
||||
return writer;
|
||||
}
|
||||
|
||||
public long getCurrentSize() throws IOException {
|
||||
return writer.getCurrentSize();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue