HBASE-3927 display total uncompressed byte size of a region in web UI
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1136686 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
bd7c8e327f
commit
ebb63ef44d
|
@ -266,6 +266,7 @@ Release 0.91.0 - Unreleased
|
||||||
HBASE-3940 HBase daemons should log version info at startup and possibly
|
HBASE-3940 HBase daemons should log version info at startup and possibly
|
||||||
periodically (Li Pi)
|
periodically (Li Pi)
|
||||||
HBASE-3789 Cleanup the locking contention in the master
|
HBASE-3789 Cleanup the locking contention in the master
|
||||||
|
HBASE-3927 Display total uncompressed byte size of a region in web UI
|
||||||
|
|
||||||
TASKS
|
TASKS
|
||||||
HBASE-3559 Move report of split to master OFF the heartbeat channel
|
HBASE-3559 Move report of split to master OFF the heartbeat channel
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.io.WritableComparable;
|
||||||
*/
|
*/
|
||||||
public class HServerLoad extends VersionedWritable
|
public class HServerLoad extends VersionedWritable
|
||||||
implements WritableComparable<HServerLoad> {
|
implements WritableComparable<HServerLoad> {
|
||||||
private static final byte VERSION = 1;
|
private static final byte VERSION = 2;
|
||||||
// Empty load instance.
|
// Empty load instance.
|
||||||
public static final HServerLoad EMPTY_HSERVERLOAD = new HServerLoad();
|
public static final HServerLoad EMPTY_HSERVERLOAD = new HServerLoad();
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ implements WritableComparable<HServerLoad> {
|
||||||
* Encapsulates per-region loading metrics.
|
* Encapsulates per-region loading metrics.
|
||||||
*/
|
*/
|
||||||
public static class RegionLoad extends VersionedWritable {
|
public static class RegionLoad extends VersionedWritable {
|
||||||
private static final byte VERSION = 0;
|
private static final byte VERSION = 1;
|
||||||
|
|
||||||
/** @return the object version number */
|
/** @return the object version number */
|
||||||
public byte getVersion() {
|
public byte getVersion() {
|
||||||
|
@ -77,6 +77,8 @@ implements WritableComparable<HServerLoad> {
|
||||||
private int stores;
|
private int stores;
|
||||||
/** the number of storefiles for the region */
|
/** the number of storefiles for the region */
|
||||||
private int storefiles;
|
private int storefiles;
|
||||||
|
/** the total size of the store files for the region, uncompressed, in MB */
|
||||||
|
private int storeUncompressedSizeMB;
|
||||||
/** the current total size of the store files for the region, in MB */
|
/** the current total size of the store files for the region, in MB */
|
||||||
private int storefileSizeMB;
|
private int storefileSizeMB;
|
||||||
/** the current size of the memstore for the region, in MB */
|
/** the current size of the memstore for the region, in MB */
|
||||||
|
@ -106,12 +108,14 @@ implements WritableComparable<HServerLoad> {
|
||||||
* @param writeRequestsCount
|
* @param writeRequestsCount
|
||||||
*/
|
*/
|
||||||
public RegionLoad(final byte[] name, final int stores,
|
public RegionLoad(final byte[] name, final int stores,
|
||||||
final int storefiles, final int storefileSizeMB,
|
final int storefiles, final int storeUncompressedSizeMB,
|
||||||
|
final int storefileSizeMB,
|
||||||
final int memstoreSizeMB, final int storefileIndexSizeMB,
|
final int memstoreSizeMB, final int storefileIndexSizeMB,
|
||||||
final int readRequestsCount, final int writeRequestsCount) {
|
final int readRequestsCount, final int writeRequestsCount) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.stores = stores;
|
this.stores = stores;
|
||||||
this.storefiles = storefiles;
|
this.storefiles = storefiles;
|
||||||
|
this.storeUncompressedSizeMB = storeUncompressedSizeMB;
|
||||||
this.storefileSizeMB = storefileSizeMB;
|
this.storefileSizeMB = storefileSizeMB;
|
||||||
this.memstoreSizeMB = memstoreSizeMB;
|
this.memstoreSizeMB = memstoreSizeMB;
|
||||||
this.storefileIndexSizeMB = storefileIndexSizeMB;
|
this.storefileIndexSizeMB = storefileIndexSizeMB;
|
||||||
|
@ -246,13 +250,14 @@ implements WritableComparable<HServerLoad> {
|
||||||
// Writable
|
// Writable
|
||||||
public void readFields(DataInput in) throws IOException {
|
public void readFields(DataInput in) throws IOException {
|
||||||
super.readFields(in);
|
super.readFields(in);
|
||||||
int version = getVersion();
|
int version = in.readByte();
|
||||||
if (version != VERSION) throw new IOException("Version mismatch; " + version);
|
if (version > VERSION) throw new IOException("Version mismatch; " + version);
|
||||||
int namelen = in.readInt();
|
int namelen = in.readInt();
|
||||||
this.name = new byte[namelen];
|
this.name = new byte[namelen];
|
||||||
in.readFully(this.name);
|
in.readFully(this.name);
|
||||||
this.stores = in.readInt();
|
this.stores = in.readInt();
|
||||||
this.storefiles = in.readInt();
|
this.storefiles = in.readInt();
|
||||||
|
this.storeUncompressedSizeMB = in.readInt();
|
||||||
this.storefileSizeMB = in.readInt();
|
this.storefileSizeMB = in.readInt();
|
||||||
this.memstoreSizeMB = in.readInt();
|
this.memstoreSizeMB = in.readInt();
|
||||||
this.storefileIndexSizeMB = in.readInt();
|
this.storefileIndexSizeMB = in.readInt();
|
||||||
|
@ -262,10 +267,12 @@ implements WritableComparable<HServerLoad> {
|
||||||
|
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
super.write(out);
|
super.write(out);
|
||||||
|
out.writeByte(VERSION);
|
||||||
out.writeInt(name.length);
|
out.writeInt(name.length);
|
||||||
out.write(name);
|
out.write(name);
|
||||||
out.writeInt(stores);
|
out.writeInt(stores);
|
||||||
out.writeInt(storefiles);
|
out.writeInt(storefiles);
|
||||||
|
out.writeInt(storeUncompressedSizeMB);
|
||||||
out.writeInt(storefileSizeMB);
|
out.writeInt(storefileSizeMB);
|
||||||
out.writeInt(memstoreSizeMB);
|
out.writeInt(memstoreSizeMB);
|
||||||
out.writeInt(storefileIndexSizeMB);
|
out.writeInt(storefileIndexSizeMB);
|
||||||
|
@ -282,8 +289,15 @@ implements WritableComparable<HServerLoad> {
|
||||||
Integer.valueOf(this.stores));
|
Integer.valueOf(this.stores));
|
||||||
sb = Strings.appendKeyValue(sb, "storefiles",
|
sb = Strings.appendKeyValue(sb, "storefiles",
|
||||||
Integer.valueOf(this.storefiles));
|
Integer.valueOf(this.storefiles));
|
||||||
|
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
|
||||||
|
Integer.valueOf(this.storeUncompressedSizeMB));
|
||||||
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
|
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
|
||||||
Integer.valueOf(this.storefileSizeMB));
|
Integer.valueOf(this.storefileSizeMB));
|
||||||
|
if (this.storeUncompressedSizeMB != 0) {
|
||||||
|
sb = Strings.appendKeyValue(sb, "compressionRatio",
|
||||||
|
String.format("%.4f", (float)this.storefileSizeMB/
|
||||||
|
(float)this.storeUncompressedSizeMB));
|
||||||
|
}
|
||||||
sb = Strings.appendKeyValue(sb, "memstoreSizeMB",
|
sb = Strings.appendKeyValue(sb, "memstoreSizeMB",
|
||||||
Integer.valueOf(this.memstoreSizeMB));
|
Integer.valueOf(this.memstoreSizeMB));
|
||||||
sb = Strings.appendKeyValue(sb, "storefileIndexSizeMB",
|
sb = Strings.appendKeyValue(sb, "storefileIndexSizeMB",
|
||||||
|
@ -480,8 +494,8 @@ implements WritableComparable<HServerLoad> {
|
||||||
|
|
||||||
public void readFields(DataInput in) throws IOException {
|
public void readFields(DataInput in) throws IOException {
|
||||||
super.readFields(in);
|
super.readFields(in);
|
||||||
int version = getVersion();
|
int version = in.readByte();
|
||||||
if (version != VERSION) throw new IOException("Version mismatch; " + version);
|
if (version > VERSION) throw new IOException("Version mismatch; " + version);
|
||||||
numberOfRequests = in.readInt();
|
numberOfRequests = in.readInt();
|
||||||
usedHeapMB = in.readInt();
|
usedHeapMB = in.readInt();
|
||||||
maxHeapMB = in.readInt();
|
maxHeapMB = in.readInt();
|
||||||
|
@ -495,6 +509,7 @@ implements WritableComparable<HServerLoad> {
|
||||||
|
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
super.write(out);
|
super.write(out);
|
||||||
|
out.writeByte(VERSION);
|
||||||
out.writeInt(numberOfRequests);
|
out.writeInt(numberOfRequests);
|
||||||
out.writeInt(usedHeapMB);
|
out.writeInt(usedHeapMB);
|
||||||
out.writeInt(maxHeapMB);
|
out.writeInt(maxHeapMB);
|
||||||
|
|
|
@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.ArrayList;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
|
@ -841,6 +841,10 @@ public class HFile {
|
||||||
return this.fileSize;
|
return this.fileSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getTotalUncompressedBytes() {
|
||||||
|
return this.trailer.totalUncompressedBytes;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean inMemory() {
|
public boolean inMemory() {
|
||||||
return this.inMemory;
|
return this.inMemory;
|
||||||
}
|
}
|
||||||
|
|
|
@ -908,6 +908,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
||||||
byte[] name = r.getRegionName();
|
byte[] name = r.getRegionName();
|
||||||
int stores = 0;
|
int stores = 0;
|
||||||
int storefiles = 0;
|
int storefiles = 0;
|
||||||
|
int storeUncompressedSizeMB = 0;
|
||||||
int storefileSizeMB = 0;
|
int storefileSizeMB = 0;
|
||||||
int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024);
|
int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024);
|
||||||
int storefileIndexSizeMB = 0;
|
int storefileIndexSizeMB = 0;
|
||||||
|
@ -915,11 +916,14 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
||||||
stores += r.stores.size();
|
stores += r.stores.size();
|
||||||
for (Store store : r.stores.values()) {
|
for (Store store : r.stores.values()) {
|
||||||
storefiles += store.getStorefilesCount();
|
storefiles += store.getStorefilesCount();
|
||||||
|
storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed()
|
||||||
|
/ 1024 / 1024);
|
||||||
storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
|
storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
|
||||||
storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024);
|
storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return new HServerLoad.RegionLoad(name,stores, storefiles,
|
return new HServerLoad.RegionLoad(name,stores, storefiles,
|
||||||
|
storeUncompressedSizeMB,
|
||||||
storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB,
|
storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB,
|
||||||
(int) r.readRequestsCount.get(), (int) r.writeRequestsCount.get());
|
(int) r.readRequestsCount.get(), (int) r.writeRequestsCount.get());
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,6 +110,7 @@ public class Store implements HeapSize {
|
||||||
private final long desiredMaxFileSize;
|
private final long desiredMaxFileSize;
|
||||||
private final int blockingStoreFileCount;
|
private final int blockingStoreFileCount;
|
||||||
private volatile long storeSize = 0L;
|
private volatile long storeSize = 0L;
|
||||||
|
private volatile long totalUncompressedBytes = 0L;
|
||||||
private final Object flushLock = new Object();
|
private final Object flushLock = new Object();
|
||||||
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||||
private final String storeNameStr;
|
private final String storeNameStr;
|
||||||
|
@ -288,6 +289,7 @@ public class Store implements HeapSize {
|
||||||
}
|
}
|
||||||
long length = curfile.getReader().length();
|
long length = curfile.getReader().length();
|
||||||
this.storeSize += length;
|
this.storeSize += length;
|
||||||
|
this.totalUncompressedBytes += curfile.getReader().getTotalUncompressedBytes();
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("loaded " + curfile.toStringDetailed());
|
LOG.debug("loaded " + curfile.toStringDetailed());
|
||||||
}
|
}
|
||||||
|
@ -525,6 +527,7 @@ public class Store implements HeapSize {
|
||||||
this.conf, this.family.getBloomFilterType(), this.inMemory);
|
this.conf, this.family.getBloomFilterType(), this.inMemory);
|
||||||
StoreFile.Reader r = sf.createReader();
|
StoreFile.Reader r = sf.createReader();
|
||||||
this.storeSize += r.length();
|
this.storeSize += r.length();
|
||||||
|
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
|
||||||
if(LOG.isInfoEnabled()) {
|
if(LOG.isInfoEnabled()) {
|
||||||
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
|
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
|
||||||
", sequenceid=" + logCacheFlushId +
|
", sequenceid=" + logCacheFlushId +
|
||||||
|
@ -1208,6 +1211,7 @@ public class Store implements HeapSize {
|
||||||
}
|
}
|
||||||
// 4. Compute new store size
|
// 4. Compute new store size
|
||||||
this.storeSize = 0L;
|
this.storeSize = 0L;
|
||||||
|
this.totalUncompressedBytes = 0L;
|
||||||
for (StoreFile hsf : this.storefiles) {
|
for (StoreFile hsf : this.storefiles) {
|
||||||
StoreFile.Reader r = hsf.getReader();
|
StoreFile.Reader r = hsf.getReader();
|
||||||
if (r == null) {
|
if (r == null) {
|
||||||
|
@ -1215,6 +1219,7 @@ public class Store implements HeapSize {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
this.storeSize += r.length();
|
this.storeSize += r.length();
|
||||||
|
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.writeLock().unlock();
|
this.lock.writeLock().unlock();
|
||||||
|
@ -1530,6 +1535,13 @@ public class Store implements HeapSize {
|
||||||
return this.storefiles.size();
|
return this.storefiles.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The size of the store files, in bytes, uncompressed.
|
||||||
|
*/
|
||||||
|
long getStoreSizeUncompressed() {
|
||||||
|
return this.totalUncompressedBytes;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return The size of the store files, in bytes.
|
* @return The size of the store files, in bytes.
|
||||||
*/
|
*/
|
||||||
|
@ -1684,7 +1696,7 @@ public class Store implements HeapSize {
|
||||||
|
|
||||||
public static final long FIXED_OVERHEAD = ClassSize.align(
|
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||||
ClassSize.OBJECT + (15 * ClassSize.REFERENCE) +
|
ClassSize.OBJECT + (15 * ClassSize.REFERENCE) +
|
||||||
(7 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) +
|
(8 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) +
|
||||||
(4 * Bytes.SIZEOF_INT) + (3 * Bytes.SIZEOF_BOOLEAN));
|
(4 * Bytes.SIZEOF_INT) + (3 * Bytes.SIZEOF_BOOLEAN));
|
||||||
|
|
||||||
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||||
|
|
|
@ -1092,6 +1092,10 @@ public class StoreFile {
|
||||||
return reader.length();
|
return reader.length();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getTotalUncompressedBytes() {
|
||||||
|
return reader.getTotalUncompressedBytes();
|
||||||
|
}
|
||||||
|
|
||||||
public int getEntries() {
|
public int getEntries() {
|
||||||
return reader.getEntries();
|
return reader.getEntries();
|
||||||
}
|
}
|
||||||
|
|
|
@ -353,7 +353,7 @@ public class HLogSplitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean splitLogFileToTemp(FileStatus logfile, String tmpname,
|
public boolean splitLogFileToTemp(FileStatus logfile, String tmpname,
|
||||||
CancelableProgressable reporter) throws IOException {
|
CancelableProgressable reporter) throws IOException {
|
||||||
final Map<byte[], Object> logWriters = Collections.
|
final Map<byte[], Object> logWriters = Collections.
|
||||||
synchronizedMap(new TreeMap<byte[], Object>(Bytes.BYTES_COMPARATOR));
|
synchronizedMap(new TreeMap<byte[], Object>(Bytes.BYTES_COMPARATOR));
|
||||||
boolean isCorrupted = false;
|
boolean isCorrupted = false;
|
||||||
|
@ -409,7 +409,10 @@ public class HLogSplitter {
|
||||||
if (wap == null) {
|
if (wap == null) {
|
||||||
wap = createWAP(region, entry, rootDir, tmpname, fs, conf);
|
wap = createWAP(region, entry, rootDir, tmpname, fs, conf);
|
||||||
if (wap == null) {
|
if (wap == null) {
|
||||||
|
// ignore edits from this region. It doesn't ezist anymore.
|
||||||
|
// It was probably already split.
|
||||||
logWriters.put(region, BAD_WRITER);
|
logWriters.put(region, BAD_WRITER);
|
||||||
|
continue;
|
||||||
} else {
|
} else {
|
||||||
logWriters.put(region, wap);
|
logWriters.put(region, wap);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue