+ *
+ * optional uint32 version = 1 [default = 0];
+ */
+ public Builder clearVersion() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ version_ = 0;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.PersistedCacheMetadata)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.PersistedCacheMetadata)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos.PersistedCacheMetadata DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos.PersistedCacheMetadata();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos.PersistedCacheMetadata getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser
+ PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() {
+ public PersistedCacheMetadata parsePartialFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return new PersistedCacheMetadata(input, extensionRegistry);
+ }
+ };
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos.PersistedCacheMetadata getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+
+ }
+
+ private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_PersistedCacheMetadata_descriptor;
+ private static final
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_hbase_pb_PersistedCacheMetadata_fieldAccessorTable;
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\021BucketCache.proto\022\010hbase.pb\",\n\026Persist" +
+ "edCacheMetadata\022\022\n\007version\030\001 \001(\r:\0010BK\n1o" +
+ "rg.apache.hadoop.hbase.shaded.protobuf.g" +
+ "eneratedB\021BucketCacheProtosH\001\240\001\001"
+ };
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ return null;
+ }
+ };
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ internal_static_hbase_pb_PersistedCacheMetadata_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_PersistedCacheMetadata_fieldAccessorTable = new
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_hbase_pb_PersistedCacheMetadata_descriptor,
+ new java.lang.String[] { "Version", });
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/hbase-protocol-shaded/src/main/protobuf/BucketCache.proto b/hbase-protocol-shaded/src/main/protobuf/BucketCache.proto
new file mode 100644
index 00000000000..ebde19afb4e
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/BucketCache.proto
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are shared throughout HBase
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "BucketCacheProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * Metadata written out as preamble when we persist cache content.
+ */
+message PersistedCacheMetadata {
+ // Set version to be zero
+ optional uint32 version = 1 [default = 0];
+}
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 0bdee40692e..2140a43f469 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -352,6 +352,10 @@
org.apache.hbasehbase-common
+
+ org.apache.hbase
+ hbase-protocol-shaded
+ org.apache.hbasehbase-protocol
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index a36423e8628..7033b963ed4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -53,6 +53,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -68,6 +69,9 @@ import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.IdReadWriteLock;
@@ -248,9 +252,11 @@ public class BucketCache implements BlockCache, HeapSize {
try {
retrieveFromFile(bucketSizes);
} catch (IOException ioex) {
- LOG.error("Can't restore from file because of", ioex);
+ LOG.error("Can't restore cache from persisted file " + persistencePath +
+ "; file removed; cache is cold!", ioex);
} catch (ClassNotFoundException cnfe) {
- LOG.error("Can't restore from file in rebuild because can't deserialise",cnfe);
+ LOG.error("Can't restore cache from persisted file in rebuild "
+ + "because can't deserialise; file removed; cache is cold!", cnfe);
throw new RuntimeException(cnfe);
}
}
@@ -945,6 +951,11 @@ public class BucketCache implements BlockCache, HeapSize {
return receptacle;
}
+ /**
+ * The current version of the persisted cache file.
+ */
+ private static final int PERSISTED_CACHE_VERSION = 0;
+
private void persistToFile() throws IOException {
assert !cacheEnabled;
FileOutputStream fos = null;
@@ -954,6 +965,13 @@ public class BucketCache implements BlockCache, HeapSize {
throw new IOException("Attempt to persist non-persistent cache mappings!");
}
fos = new FileOutputStream(persistencePath, false);
+ // Write out a metdata protobuf block in case we change format at later date, etc.
+ // Add our magic as preamble.
+ fos.write(ProtobufMagic.PB_MAGIC, 0, ProtobufMagic.lengthOfPBMagic());
+ BucketCacheProtos.PersistedCacheMetadata metadata =
+ BucketCacheProtos.PersistedCacheMetadata.newBuilder().
+ setVersion(PERSISTED_CACHE_VERSION).build();
+ metadata.writeDelimitedTo(fos);
oos = new ObjectOutputStream(fos);
oos.writeLong(cacheCapacity);
oos.writeUTF(ioEngine.getClass().getName());
@@ -966,9 +984,12 @@ public class BucketCache implements BlockCache, HeapSize {
}
}
+ /**
+ * @see #persistToFile()
+ */
@SuppressWarnings("unchecked")
- private void retrieveFromFile(int[] bucketSizes) throws IOException, BucketAllocatorException,
- ClassNotFoundException {
+ private void retrieveFromFile(int[] bucketSizes)
+ throws IOException, BucketAllocatorException, ClassNotFoundException {
File persistenceFile = new File(persistencePath);
if (!persistenceFile.exists()) {
return;
@@ -977,10 +998,35 @@ public class BucketCache implements BlockCache, HeapSize {
FileInputStream fis = null;
ObjectInputStream ois = null;
try {
- if (!ioEngine.isPersistent())
- throw new IOException(
- "Attempt to restore non-persistent cache mappings!");
+ if (!ioEngine.isPersistent()) {
+ throw new IOException("Attempt to restore non-persistent cache mappings!");
+ }
fis = new FileInputStream(persistencePath);
+ // Read protobuf magic and then metadata. See persistToFile for where we wrote
+ // out metadata for format.
+ byte [] pbmagic = new byte [ProtobufMagic.lengthOfPBMagic()];
+ int len = fis.read(pbmagic, 0, pbmagic.length);
+ if (len != pbmagic.length || !ProtobufMagic.isPBMagicPrefix(pbmagic)) {
+ // Throw exception. In finally we remove the file ALWAYS.
+ throw new HBaseIOException("Failed read of protobuf magic ("
+ + Bytes.toString(pbmagic)+ "); old format (HBASE-16993)? "
+ + "Failed read of persisted cache file=" + persistencePath);
+ }
+ BucketCacheProtos.PersistedCacheMetadata metadata = null;
+ try {
+ metadata =
+ BucketCacheProtos.PersistedCacheMetadata.parseDelimitedFrom(fis);
+ } catch (IOException e) {
+ // Throw exception if failed parse. In finally we remove the
+ throw new HBaseIOException("Failed read of persisted cache metadata file=" +
+ persistencePath, e);
+ }
+ if (metadata.getVersion() != PERSISTED_CACHE_VERSION) {
+ throw new HBaseIOException("Unexpected version of persisted cache metadata file=" +
+ persistencePath + "; expected=" + PERSISTED_CACHE_VERSION + " but read=" +
+ metadata.getVersion());
+ }
+ // Ok. Read metadata. All seems good. Go ahead and pull in the persisted cache.
ois = new ObjectInputStream(fis);
long capacitySize = ois.readLong();
if (capacitySize != cacheCapacity)
@@ -1010,6 +1056,8 @@ public class BucketCache implements BlockCache, HeapSize {
if (!persistenceFile.delete()) {
throw new IOException("Failed deleting persistence file "
+ persistenceFile.getAbsolutePath());
+ } else {
+ LOG.info("Deleted persisted cache file " + persistencePath);
}
}
}
@@ -1130,10 +1178,7 @@ public class BucketCache implements BlockCache, HeapSize {
/**
* Item in cache. We expect this to be where most memory goes. Java uses 8
* bytes just for object headers; after this, we want to use as little as
- * possible - so we only use 8 bytes, but in order to do so we end up messing
- * around with all this Java casting stuff. Offset stored as 5 bytes that make
- * up the long. Doubt we'll see devices this big for ages. Offsets are divided
- * by 256. So 5 bytes gives us 256TB or so.
+ * possible.
*/
static class BucketEntry implements Serializable {
private static final long serialVersionUID = -6741504807982257534L;
@@ -1147,15 +1192,14 @@ public class BucketCache implements BlockCache, HeapSize {
}
};
- private int offsetBase;
private int length;
- private byte offset1;
byte deserialiserIndex;
private volatile long accessCounter;
private BlockPriority priority;
// Set this when we were not able to forcefully evict the block
private volatile boolean markedForEvict;
private AtomicInteger refCount = new AtomicInteger(0);
+ private long offset;
/**
* Time this block was cached. Presumes we are created just before we are added to the cache.
@@ -1173,17 +1217,12 @@ public class BucketCache implements BlockCache, HeapSize {
}
}
- long offset() { // Java has no unsigned numbers
- long o = ((long) offsetBase) & 0xFFFFFFFF;
- o += (((long) (offset1)) & 0xFF) << 32;
- return o << 8;
+ long offset() {
+ return this.offset;
}
private void setOffset(long value) {
- assert (value & 0xFF) == 0;
- value >>= 8;
- offsetBase = (int) value;
- offset1 = (byte) (value >> 32);
+ this.offset = value;
}
public int getLength() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
index 6fe352da504..976066d48d7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
@@ -18,8 +18,7 @@
*/
package org.apache.hadoop.hbase.io.hfile.bucket;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -60,12 +59,11 @@ public class TestBucketCache {
@Parameterized.Parameters(name = "{index}: blockSize={0}, bucketSizes={1}")
public static Iterable