HBASE-3927 display total uncompressed byte size of a region in web UI
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1136686 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
bd7c8e327f
commit
ebb63ef44d
|
@ -266,6 +266,7 @@ Release 0.91.0 - Unreleased
|
|||
HBASE-3940 HBase daemons should log version info at startup and possibly
|
||||
periodically (Li Pi)
|
||||
HBASE-3789 Cleanup the locking contention in the master
|
||||
HBASE-3927 Display total uncompressed byte size of a region in web UI
|
||||
|
||||
TASKS
|
||||
HBASE-3559 Move report of split to master OFF the heartbeat channel
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
*/
|
||||
public class HServerLoad extends VersionedWritable
|
||||
implements WritableComparable<HServerLoad> {
|
||||
private static final byte VERSION = 1;
|
||||
private static final byte VERSION = 2;
|
||||
// Empty load instance.
|
||||
public static final HServerLoad EMPTY_HSERVERLOAD = new HServerLoad();
|
||||
|
||||
|
@ -64,7 +64,7 @@ implements WritableComparable<HServerLoad> {
|
|||
* Encapsulates per-region loading metrics.
|
||||
*/
|
||||
public static class RegionLoad extends VersionedWritable {
|
||||
private static final byte VERSION = 0;
|
||||
private static final byte VERSION = 1;
|
||||
|
||||
/** @return the object version number */
|
||||
public byte getVersion() {
|
||||
|
@ -77,6 +77,8 @@ implements WritableComparable<HServerLoad> {
|
|||
private int stores;
|
||||
/** the number of storefiles for the region */
|
||||
private int storefiles;
|
||||
/** the total size of the store files for the region, uncompressed, in MB */
|
||||
private int storeUncompressedSizeMB;
|
||||
/** the current total size of the store files for the region, in MB */
|
||||
private int storefileSizeMB;
|
||||
/** the current size of the memstore for the region, in MB */
|
||||
|
@ -106,12 +108,14 @@ implements WritableComparable<HServerLoad> {
|
|||
* @param writeRequestsCount
|
||||
*/
|
||||
public RegionLoad(final byte[] name, final int stores,
|
||||
final int storefiles, final int storefileSizeMB,
|
||||
final int storefiles, final int storeUncompressedSizeMB,
|
||||
final int storefileSizeMB,
|
||||
final int memstoreSizeMB, final int storefileIndexSizeMB,
|
||||
final int readRequestsCount, final int writeRequestsCount) {
|
||||
this.name = name;
|
||||
this.stores = stores;
|
||||
this.storefiles = storefiles;
|
||||
this.storeUncompressedSizeMB = storeUncompressedSizeMB;
|
||||
this.storefileSizeMB = storefileSizeMB;
|
||||
this.memstoreSizeMB = memstoreSizeMB;
|
||||
this.storefileIndexSizeMB = storefileIndexSizeMB;
|
||||
|
@ -246,13 +250,14 @@ implements WritableComparable<HServerLoad> {
|
|||
// Writable
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
int version = getVersion();
|
||||
if (version != VERSION) throw new IOException("Version mismatch; " + version);
|
||||
int version = in.readByte();
|
||||
if (version > VERSION) throw new IOException("Version mismatch; " + version);
|
||||
int namelen = in.readInt();
|
||||
this.name = new byte[namelen];
|
||||
in.readFully(this.name);
|
||||
this.stores = in.readInt();
|
||||
this.storefiles = in.readInt();
|
||||
this.storeUncompressedSizeMB = in.readInt();
|
||||
this.storefileSizeMB = in.readInt();
|
||||
this.memstoreSizeMB = in.readInt();
|
||||
this.storefileIndexSizeMB = in.readInt();
|
||||
|
@ -262,10 +267,12 @@ implements WritableComparable<HServerLoad> {
|
|||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
out.writeByte(VERSION);
|
||||
out.writeInt(name.length);
|
||||
out.write(name);
|
||||
out.writeInt(stores);
|
||||
out.writeInt(storefiles);
|
||||
out.writeInt(storeUncompressedSizeMB);
|
||||
out.writeInt(storefileSizeMB);
|
||||
out.writeInt(memstoreSizeMB);
|
||||
out.writeInt(storefileIndexSizeMB);
|
||||
|
@ -282,8 +289,15 @@ implements WritableComparable<HServerLoad> {
|
|||
Integer.valueOf(this.stores));
|
||||
sb = Strings.appendKeyValue(sb, "storefiles",
|
||||
Integer.valueOf(this.storefiles));
|
||||
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
|
||||
Integer.valueOf(this.storeUncompressedSizeMB));
|
||||
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
|
||||
Integer.valueOf(this.storefileSizeMB));
|
||||
if (this.storeUncompressedSizeMB != 0) {
|
||||
sb = Strings.appendKeyValue(sb, "compressionRatio",
|
||||
String.format("%.4f", (float)this.storefileSizeMB/
|
||||
(float)this.storeUncompressedSizeMB));
|
||||
}
|
||||
sb = Strings.appendKeyValue(sb, "memstoreSizeMB",
|
||||
Integer.valueOf(this.memstoreSizeMB));
|
||||
sb = Strings.appendKeyValue(sb, "storefileIndexSizeMB",
|
||||
|
@ -480,8 +494,8 @@ implements WritableComparable<HServerLoad> {
|
|||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
int version = getVersion();
|
||||
if (version != VERSION) throw new IOException("Version mismatch; " + version);
|
||||
int version = in.readByte();
|
||||
if (version > VERSION) throw new IOException("Version mismatch; " + version);
|
||||
numberOfRequests = in.readInt();
|
||||
usedHeapMB = in.readInt();
|
||||
maxHeapMB = in.readInt();
|
||||
|
@ -495,6 +509,7 @@ implements WritableComparable<HServerLoad> {
|
|||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
out.writeByte(VERSION);
|
||||
out.writeInt(numberOfRequests);
|
||||
out.writeInt(usedHeapMB);
|
||||
out.writeInt(maxHeapMB);
|
||||
|
|
|
@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
|
|
@ -841,6 +841,10 @@ public class HFile {
|
|||
return this.fileSize;
|
||||
}
|
||||
|
||||
public long getTotalUncompressedBytes() {
|
||||
return this.trailer.totalUncompressedBytes;
|
||||
}
|
||||
|
||||
public boolean inMemory() {
|
||||
return this.inMemory;
|
||||
}
|
||||
|
|
|
@ -908,6 +908,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
byte[] name = r.getRegionName();
|
||||
int stores = 0;
|
||||
int storefiles = 0;
|
||||
int storeUncompressedSizeMB = 0;
|
||||
int storefileSizeMB = 0;
|
||||
int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024);
|
||||
int storefileIndexSizeMB = 0;
|
||||
|
@ -915,11 +916,14 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
stores += r.stores.size();
|
||||
for (Store store : r.stores.values()) {
|
||||
storefiles += store.getStorefilesCount();
|
||||
storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed()
|
||||
/ 1024 / 1024);
|
||||
storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
|
||||
storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024);
|
||||
}
|
||||
}
|
||||
return new HServerLoad.RegionLoad(name,stores, storefiles,
|
||||
storeUncompressedSizeMB,
|
||||
storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB,
|
||||
(int) r.readRequestsCount.get(), (int) r.writeRequestsCount.get());
|
||||
}
|
||||
|
|
|
@ -110,6 +110,7 @@ public class Store implements HeapSize {
|
|||
private final long desiredMaxFileSize;
|
||||
private final int blockingStoreFileCount;
|
||||
private volatile long storeSize = 0L;
|
||||
private volatile long totalUncompressedBytes = 0L;
|
||||
private final Object flushLock = new Object();
|
||||
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
private final String storeNameStr;
|
||||
|
@ -288,6 +289,7 @@ public class Store implements HeapSize {
|
|||
}
|
||||
long length = curfile.getReader().length();
|
||||
this.storeSize += length;
|
||||
this.totalUncompressedBytes += curfile.getReader().getTotalUncompressedBytes();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("loaded " + curfile.toStringDetailed());
|
||||
}
|
||||
|
@ -525,6 +527,7 @@ public class Store implements HeapSize {
|
|||
this.conf, this.family.getBloomFilterType(), this.inMemory);
|
||||
StoreFile.Reader r = sf.createReader();
|
||||
this.storeSize += r.length();
|
||||
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
|
||||
if(LOG.isInfoEnabled()) {
|
||||
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
|
||||
", sequenceid=" + logCacheFlushId +
|
||||
|
@ -1208,6 +1211,7 @@ public class Store implements HeapSize {
|
|||
}
|
||||
// 4. Compute new store size
|
||||
this.storeSize = 0L;
|
||||
this.totalUncompressedBytes = 0L;
|
||||
for (StoreFile hsf : this.storefiles) {
|
||||
StoreFile.Reader r = hsf.getReader();
|
||||
if (r == null) {
|
||||
|
@ -1215,6 +1219,7 @@ public class Store implements HeapSize {
|
|||
continue;
|
||||
}
|
||||
this.storeSize += r.length();
|
||||
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
|
||||
}
|
||||
} finally {
|
||||
this.lock.writeLock().unlock();
|
||||
|
@ -1530,6 +1535,13 @@ public class Store implements HeapSize {
|
|||
return this.storefiles.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The size of the store files, in bytes, uncompressed.
|
||||
*/
|
||||
long getStoreSizeUncompressed() {
|
||||
return this.totalUncompressedBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The size of the store files, in bytes.
|
||||
*/
|
||||
|
@ -1684,7 +1696,7 @@ public class Store implements HeapSize {
|
|||
|
||||
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||
ClassSize.OBJECT + (15 * ClassSize.REFERENCE) +
|
||||
(7 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) +
|
||||
(8 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) +
|
||||
(4 * Bytes.SIZEOF_INT) + (3 * Bytes.SIZEOF_BOOLEAN));
|
||||
|
||||
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||
|
|
|
@ -1092,6 +1092,10 @@ public class StoreFile {
|
|||
return reader.length();
|
||||
}
|
||||
|
||||
public long getTotalUncompressedBytes() {
|
||||
return reader.getTotalUncompressedBytes();
|
||||
}
|
||||
|
||||
public int getEntries() {
|
||||
return reader.getEntries();
|
||||
}
|
||||
|
|
|
@ -353,7 +353,7 @@ public class HLogSplitter {
|
|||
}
|
||||
|
||||
public boolean splitLogFileToTemp(FileStatus logfile, String tmpname,
|
||||
CancelableProgressable reporter) throws IOException {
|
||||
CancelableProgressable reporter) throws IOException {
|
||||
final Map<byte[], Object> logWriters = Collections.
|
||||
synchronizedMap(new TreeMap<byte[], Object>(Bytes.BYTES_COMPARATOR));
|
||||
boolean isCorrupted = false;
|
||||
|
@ -409,7 +409,10 @@ public class HLogSplitter {
|
|||
if (wap == null) {
|
||||
wap = createWAP(region, entry, rootDir, tmpname, fs, conf);
|
||||
if (wap == null) {
|
||||
// ignore edits from this region. It doesn't ezist anymore.
|
||||
// It was probably already split.
|
||||
logWriters.put(region, BAD_WRITER);
|
||||
continue;
|
||||
} else {
|
||||
logWriters.put(region, wap);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue