HDDS-1449. JVM Exit in datanode while committing a key. Contributed by Mukul Kumar Singh. (#825)

This commit is contained in:
Mukul Kumar Singh 2019-05-22 17:18:40 +05:30 committed by bshashikant
parent 67f9a7b165
commit 2fc6f8599a
19 changed files with 688 additions and 575 deletions

View File

@ -48,7 +48,7 @@ import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.BatchOperation;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -198,52 +198,54 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
} }
int newDeletionBlocks = 0; int newDeletionBlocks = 0;
MetadataStore containerDB = BlockUtils.getDB(containerData, conf); try(ReferenceCountedDB containerDB =
for (Long blk : delTX.getLocalIDList()) { BlockUtils.getDB(containerData, conf)) {
BatchOperation batch = new BatchOperation(); for (Long blk : delTX.getLocalIDList()) {
byte[] blkBytes = Longs.toByteArray(blk); BatchOperation batch = new BatchOperation();
byte[] blkInfo = containerDB.get(blkBytes); byte[] blkBytes = Longs.toByteArray(blk);
if (blkInfo != null) { byte[] blkInfo = containerDB.getStore().get(blkBytes);
byte[] deletingKeyBytes = if (blkInfo != null) {
DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk); byte[] deletingKeyBytes =
byte[] deletedKeyBytes = DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk); byte[] deletedKeyBytes =
if (containerDB.get(deletingKeyBytes) != null DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
|| containerDB.get(deletedKeyBytes) != null) { if (containerDB.getStore().get(deletingKeyBytes) != null
LOG.debug(String.format( || containerDB.getStore().get(deletedKeyBytes) != null) {
"Ignoring delete for block %d in container %d." LOG.debug(String.format(
+ " Entry already added.", blk, containerId)); "Ignoring delete for block %d in container %d."
continue; + " Entry already added.", blk, containerId));
continue;
}
// Found the block in container db,
// use an atomic update to change its state to deleting.
batch.put(deletingKeyBytes, blkInfo);
batch.delete(blkBytes);
try {
containerDB.getStore().writeBatch(batch);
newDeletionBlocks++;
LOG.debug("Transited Block {} to DELETING state in container {}",
blk, containerId);
} catch (IOException e) {
// if some blocks failed to delete, we fail this TX,
// without sending this ACK to SCM, SCM will resend the TX
// with a certain number of retries.
throw new IOException(
"Failed to delete blocks for TXID = " + delTX.getTxID(), e);
}
} else {
LOG.debug("Block {} not found or already under deletion in"
+ " container {}, skip deleting it.", blk, containerId);
} }
// Found the block in container db,
// use an atomic update to change its state to deleting.
batch.put(deletingKeyBytes, blkInfo);
batch.delete(blkBytes);
try {
containerDB.writeBatch(batch);
newDeletionBlocks++;
LOG.debug("Transited Block {} to DELETING state in container {}",
blk, containerId);
} catch (IOException e) {
// if some blocks failed to delete, we fail this TX,
// without sending this ACK to SCM, SCM will resend the TX
// with a certain number of retries.
throw new IOException(
"Failed to delete blocks for TXID = " + delTX.getTxID(), e);
}
} else {
LOG.debug("Block {} not found or already under deletion in"
+ " container {}, skip deleting it.", blk, containerId);
} }
}
containerDB containerDB.getStore()
.put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX), .put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX),
Longs.toByteArray(delTX.getTxID())); Longs.toByteArray(delTX.getTxID()));
containerData containerData
.updateDeleteTransactionId(delTX.getTxID()); .updateDeleteTransactionId(delTX.getTxID());
// update pending deletion blocks count in in-memory container status // update pending deletion blocks count in in-memory container status
containerData.incrPendingDeletionBlocks(newDeletionBlocks); containerData.incrPendingDeletionBlocks(newDeletionBlocks);
}
} }
@Override @Override

View File

@ -28,8 +28,11 @@ import org.apache.hadoop.utils.MetadataStoreBuilder;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
@ -92,8 +95,8 @@ public final class ContainerCache extends LRUMap {
MapIterator iterator = cache.mapIterator(); MapIterator iterator = cache.mapIterator();
while (iterator.hasNext()) { while (iterator.hasNext()) {
iterator.next(); iterator.next();
MetadataStore db = (MetadataStore) iterator.getValue(); ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue();
closeDB((String)iterator.getKey(), db); db.setEvicted(true);
} }
// reset the cache // reset the cache
cache.clear(); cache.clear();
@ -107,11 +110,11 @@ public final class ContainerCache extends LRUMap {
*/ */
@Override @Override
protected boolean removeLRU(LinkEntry entry) { protected boolean removeLRU(LinkEntry entry) {
MetadataStore db = (MetadataStore) entry.getValue(); ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue();
String dbFile = (String)entry.getKey(); String dbFile = (String)entry.getKey();
lock.lock(); lock.lock();
try { try {
closeDB(dbFile, db); db.setEvicted(false);
return true; return true;
} catch (Exception e) { } catch (Exception e) {
LOG.error("Eviction for db:{} failed", dbFile, e); LOG.error("Eviction for db:{} failed", dbFile, e);
@ -128,26 +131,30 @@ public final class ContainerCache extends LRUMap {
* @param containerDBType - DB type of the container. * @param containerDBType - DB type of the container.
* @param containerDBPath - DB path of the container. * @param containerDBPath - DB path of the container.
* @param conf - Hadoop Configuration. * @param conf - Hadoop Configuration.
* @return MetadataStore. * @return ReferenceCountedDB.
*/ */
public MetadataStore getDB(long containerID, String containerDBType, public ReferenceCountedDB getDB(long containerID, String containerDBType,
String containerDBPath, Configuration conf) String containerDBPath, Configuration conf)
throws IOException { throws IOException {
Preconditions.checkState(containerID >= 0, Preconditions.checkState(containerID >= 0,
"Container ID cannot be negative."); "Container ID cannot be negative.");
lock.lock(); lock.lock();
try { try {
MetadataStore db = (MetadataStore) this.get(containerDBPath); ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath);
if (db == null) { if (db == null) {
db = MetadataStoreBuilder.newBuilder() MetadataStore metadataStore =
MetadataStoreBuilder.newBuilder()
.setDbFile(new File(containerDBPath)) .setDbFile(new File(containerDBPath))
.setCreateIfMissing(false) .setCreateIfMissing(false)
.setConf(conf) .setConf(conf)
.setDBType(containerDBType) .setDBType(containerDBType)
.build(); .build();
db = new ReferenceCountedDB(metadataStore, containerDBPath);
this.put(containerDBPath, db); this.put(containerDBPath, db);
} }
// increment the reference before returning the object
db.incrementReference();
return db; return db;
} catch (Exception e) { } catch (Exception e) {
LOG.error("Error opening DB. Container:{} ContainerPath:{}", LOG.error("Error opening DB. Container:{} ContainerPath:{}",
@ -161,16 +168,70 @@ public final class ContainerCache extends LRUMap {
/** /**
* Remove a DB handler from cache. * Remove a DB handler from cache.
* *
* @param containerPath - path of the container db file. * @param containerDBPath - path of the container db file.
*/ */
public void removeDB(String containerPath) { public void removeDB(String containerDBPath) {
lock.lock(); lock.lock();
try { try {
MetadataStore db = (MetadataStore)this.get(containerPath); ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath);
closeDB(containerPath, db); if (db != null) {
this.remove(containerPath); // marking it as evicted will close the db as well.
db.setEvicted(true);
}
this.remove(containerDBPath);
} finally { } finally {
lock.unlock(); lock.unlock();
} }
} }
/**
* Class to implement reference counting over instances handed by Container
* Cache.
*/
public class ReferenceCountedDB implements Closeable {
private final AtomicInteger referenceCount;
private final AtomicBoolean isEvicted;
private final MetadataStore store;
private final String containerDBPath;
public ReferenceCountedDB(MetadataStore store, String containerDBPath) {
this.referenceCount = new AtomicInteger(0);
this.isEvicted = new AtomicBoolean(false);
this.store = store;
this.containerDBPath = containerDBPath;
}
private void incrementReference() {
this.referenceCount.incrementAndGet();
}
private void decrementReference() {
this.referenceCount.decrementAndGet();
cleanup();
}
private void setEvicted(boolean checkNoReferences) {
Preconditions.checkState(!checkNoReferences ||
(referenceCount.get() == 0),
"checkNoReferences:%b, referencount:%d",
checkNoReferences, referenceCount.get());
isEvicted.set(true);
cleanup();
}
private void cleanup() {
if (referenceCount.get() == 0 && isEvicted.get() && store != null) {
closeDB(containerDBPath, store);
}
}
public MetadataStore getStore() {
return store;
}
public void close() {
decrementReference();
}
}
} }

View File

@ -31,11 +31,12 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocat
import org.apache.hadoop.utils.MetaStoreIterator; import org.apache.hadoop.utils.MetaStoreIterator;
import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.apache.hadoop.utils.MetadataStore.KeyValue; import org.apache.hadoop.utils.MetadataStore.KeyValue;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
@ -48,12 +49,14 @@ import java.util.NoSuchElementException;
* {@link MetadataKeyFilters#getNormalKeyFilter()} * {@link MetadataKeyFilters#getNormalKeyFilter()}
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
public class KeyValueBlockIterator implements BlockIterator<BlockData> { public class KeyValueBlockIterator implements BlockIterator<BlockData>,
Closeable {
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
KeyValueBlockIterator.class); KeyValueBlockIterator.class);
private MetaStoreIterator<KeyValue> blockIterator; private MetaStoreIterator<KeyValue> blockIterator;
private final ReferenceCountedDB db;
private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
.getNormalKeyFilter(); .getNormalKeyFilter();
private KeyPrefixFilter blockFilter; private KeyPrefixFilter blockFilter;
@ -91,9 +94,9 @@ public class KeyValueBlockIterator implements BlockIterator<BlockData> {
containerData; containerData;
keyValueContainerData.setDbFile(KeyValueContainerLocationUtil keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
.getContainerDBFile(metdataPath, containerId)); .getContainerDBFile(metdataPath, containerId));
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, new db = BlockUtils.getDB(keyValueContainerData, new
OzoneConfiguration()); OzoneConfiguration());
blockIterator = metadataStore.iterator(); blockIterator = db.getStore().iterator();
blockFilter = filter; blockFilter = filter;
} }
@ -145,4 +148,8 @@ public class KeyValueBlockIterator implements BlockIterator<BlockData> {
nextBlock = null; nextBlock = null;
blockIterator.seekToLast(); blockIterator.seekToLast();
} }
public void close() {
db.close();
}
} }

View File

@ -54,7 +54,6 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers
.KeyValueContainerLocationUtil; .KeyValueContainerLocationUtil;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.utils.MetadataStore;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
@ -74,6 +73,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.UNSUPPORTED_REQUEST; .Result.UNSUPPORTED_REQUEST;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -349,11 +349,12 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
void compactDB() throws StorageContainerException { void compactDB() throws StorageContainerException {
try { try {
MetadataStore db = BlockUtils.getDB(containerData, config); try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
db.compactDB(); db.getStore().compactDB();
LOG.info("Container {} is closed with bcsId {}.", LOG.info("Container {} is closed with bcsId {}.",
containerData.getContainerID(), containerData.getContainerID(),
containerData.getBlockCommitSequenceId()); containerData.getBlockCommitSequenceId());
}
} catch (StorageContainerException ex) { } catch (StorageContainerException ex) {
throw ex; throw ex;
} catch (IOException ex) { } catch (IOException ex) {

View File

@ -30,12 +30,12 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
import org.apache.hadoop.utils.MetadataStore;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -236,41 +236,42 @@ public class KeyValueContainerCheck {
onDiskContainerData.setDbFile(dbFile); onDiskContainerData.setDbFile(dbFile);
MetadataStore db = BlockUtils try(ReferenceCountedDB db =
.getDB(onDiskContainerData, checkConfig); BlockUtils.getDB(onDiskContainerData, checkConfig)) {
iterateBlockDB(db);
iterateBlockDB(db); }
} }
private void iterateBlockDB(MetadataStore db) private void iterateBlockDB(ReferenceCountedDB db)
throws IOException { throws IOException {
Preconditions.checkState(db != null); Preconditions.checkState(db != null);
// get "normal" keys from the Block DB // get "normal" keys from the Block DB
KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, try(KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID,
new File(onDiskContainerData.getContainerPath())); new File(onDiskContainerData.getContainerPath()))) {
// ensure there is a chunk file for each key in the DB // ensure there is a chunk file for each key in the DB
while (kvIter.hasNext()) { while (kvIter.hasNext()) {
BlockData block = kvIter.nextBlock(); BlockData block = kvIter.nextBlock();
List<ContainerProtos.ChunkInfo> chunkInfoList = block.getChunks(); List<ContainerProtos.ChunkInfo> chunkInfoList = block.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { for (ContainerProtos.ChunkInfo chunk : chunkInfoList) {
File chunkFile; File chunkFile;
chunkFile = ChunkUtils.getChunkFile(onDiskContainerData, chunkFile = ChunkUtils.getChunkFile(onDiskContainerData,
ChunkInfo.getFromProtoBuf(chunk)); ChunkInfo.getFromProtoBuf(chunk));
if (!chunkFile.exists()) { if (!chunkFile.exists()) {
// concurrent mutation in Block DB? lookup the block again. // concurrent mutation in Block DB? lookup the block again.
byte[] bdata = db.get( byte[] bdata = db.getStore().get(
Longs.toByteArray(block.getBlockID().getLocalID())); Longs.toByteArray(block.getBlockID().getLocalID()));
if (bdata == null) { if (bdata == null) {
LOG.trace("concurrency with delete, ignoring deleted block"); LOG.trace("concurrency with delete, ignoring deleted block");
break; // skip to next block from kvIter break; // skip to next block from kvIter
} else { } else {
String errorStr = "Missing chunk file " String errorStr = "Missing chunk file "
+ chunkFile.getAbsolutePath(); + chunkFile.getAbsolutePath();
throw new IOException(errorStr); throw new IOException(errorStr);
}
} }
} }
} }

View File

@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import java.io.IOException; import java.io.IOException;
@ -66,7 +66,7 @@ public final class BlockUtils {
* @return MetadataStore handle. * @return MetadataStore handle.
* @throws StorageContainerException * @throws StorageContainerException
*/ */
public static MetadataStore getDB(KeyValueContainerData containerData, public static ReferenceCountedDB getDB(KeyValueContainerData containerData,
Configuration conf) throws Configuration conf) throws
StorageContainerException { StorageContainerException {
Preconditions.checkNotNull(containerData); Preconditions.checkNotNull(containerData);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.utils.MetadataStoreBuilder;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -174,22 +175,25 @@ public final class KeyValueContainerUtil {
} }
kvContainerData.setDbFile(dbFile); kvContainerData.setDbFile(dbFile);
MetadataStore metadata = BlockUtils.getDB(kvContainerData, config); try(ReferenceCountedDB metadata =
long bytesUsed = 0; BlockUtils.getDB(kvContainerData, config)) {
List<Map.Entry<byte[], byte[]>> liveKeys = metadata long bytesUsed = 0;
.getRangeKVs(null, Integer.MAX_VALUE, List<Map.Entry<byte[], byte[]>> liveKeys = metadata.getStore()
MetadataKeyFilters.getNormalKeyFilter()); .getRangeKVs(null, Integer.MAX_VALUE,
bytesUsed = liveKeys.parallelStream().mapToLong(e-> { MetadataKeyFilters.getNormalKeyFilter());
BlockData blockData;
try { bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
blockData = BlockUtils.getBlockData(e.getValue()); BlockData blockData;
return blockData.getSize(); try {
} catch (IOException ex) { blockData = BlockUtils.getBlockData(e.getValue());
return 0L; return blockData.getSize();
} } catch (IOException ex) {
}).sum(); return 0L;
kvContainerData.setBytesUsed(bytesUsed); }
kvContainerData.setKeyCount(liveKeys.size()); }).sum();
kvContainerData.setBytesUsed(bytesUsed);
kvContainerData.setKeyCount(liveKeys.size());
}
} }
/** /**

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.BatchOperation;
import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -84,47 +84,47 @@ public class BlockManagerImpl implements BlockManager {
"cannot be negative"); "cannot be negative");
// We are not locking the key manager since LevelDb serializes all actions // We are not locking the key manager since LevelDb serializes all actions
// against a single DB. We rely on DB level locking to avoid conflicts. // against a single DB. We rely on DB level locking to avoid conflicts.
MetadataStore db = BlockUtils.getDB((KeyValueContainerData) container try(ReferenceCountedDB db = BlockUtils.
.getContainerData(), config); getDB((KeyValueContainerData) container.getContainerData(), config)) {
// This is a post condition that acts as a hint to the user.
// Should never fail.
Preconditions.checkNotNull(db, "DB cannot be null here");
// This is a post condition that acts as a hint to the user. long bcsId = data.getBlockCommitSequenceId();
// Should never fail. long containerBCSId = ((KeyValueContainerData) container.
Preconditions.checkNotNull(db, "DB cannot be null here"); getContainerData()).getBlockCommitSequenceId();
long bcsId = data.getBlockCommitSequenceId(); // default blockCommitSequenceId for any block is 0. It the putBlock
long containerBCSId = ((KeyValueContainerData) container.getContainerData()) // request is not coming via Ratis(for test scenarios), it will be 0.
.getBlockCommitSequenceId(); // In such cases, we should overwrite the block as well
if (bcsId != 0) {
// default blockCommitSequenceId for any block is 0. It the putBlock if (bcsId <= containerBCSId) {
// request is not coming via Ratis(for test scenarios), it will be 0. // Since the blockCommitSequenceId stored in the db is greater than
// In such cases, we should overwrite the block as well // equal to blockCommitSequenceId to be updated, it means the putBlock
if (bcsId != 0) { // transaction is reapplied in the ContainerStateMachine on restart.
if (bcsId <= containerBCSId) { // It also implies that the given block must already exist in the db.
// Since the blockCommitSequenceId stored in the db is greater than // just log and return
// equal to blockCommitSequenceId to be updated, it means the putBlock LOG.warn("blockCommitSequenceId " + containerBCSId
// transaction is reapplied in the ContainerStateMachine on restart. + " in the Container Db is greater than" + " the supplied value "
// It also implies that the given block must already exist in the db. + bcsId + " .Ignoring it");
// just log and return return data.getSize();
LOG.warn("blockCommitSequenceId " + containerBCSId }
+ " in the Container Db is greater than" + " the supplied value "
+ bcsId + " .Ignoring it");
return data.getSize();
} }
// update the blockData as well as BlockCommitSequenceId here
BatchOperation batch = new BatchOperation();
batch.put(Longs.toByteArray(data.getLocalID()),
data.getProtoBufMessage().toByteArray());
batch.put(blockCommitSequenceIdKey,
Longs.toByteArray(bcsId));
db.getStore().writeBatch(batch);
container.updateBlockCommitSequenceId(bcsId);
// Increment keycount here
container.getContainerData().incrKeyCount();
LOG.debug(
"Block " + data.getBlockID() + " successfully committed with bcsId "
+ bcsId + " chunk size " + data.getChunks().size());
return data.getSize();
} }
// update the blockData as well as BlockCommitSequenceId here
BatchOperation batch = new BatchOperation();
batch.put(Longs.toByteArray(data.getLocalID()),
data.getProtoBufMessage().toByteArray());
batch.put(blockCommitSequenceIdKey,
Longs.toByteArray(bcsId));
db.writeBatch(batch);
container.updateBlockCommitSequenceId(bcsId);
// Increment keycount here
container.getContainerData().incrKeyCount();
LOG.debug(
"Block " + data.getBlockID() + " successfully committed with bcsId "
+ bcsId + " chunk size " + data.getChunks().size());
return data.getSize();
} }
/** /**
@ -146,32 +146,33 @@ public class BlockManagerImpl implements BlockManager {
KeyValueContainerData containerData = (KeyValueContainerData) container KeyValueContainerData containerData = (KeyValueContainerData) container
.getContainerData(); .getContainerData();
MetadataStore db = BlockUtils.getDB(containerData, config); try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
// This is a post condition that acts as a hint to the user. // This is a post condition that acts as a hint to the user.
// Should never fail. // Should never fail.
Preconditions.checkNotNull(db, "DB cannot be null here"); Preconditions.checkNotNull(db, "DB cannot be null here");
long containerBCSId = containerData.getBlockCommitSequenceId(); long containerBCSId = containerData.getBlockCommitSequenceId();
if (containerBCSId < bcsId) { if (containerBCSId < bcsId) {
throw new StorageContainerException( throw new StorageContainerException(
"Unable to find the block with bcsID " + bcsId + " .Container " "Unable to find the block with bcsID " + bcsId + " .Container "
+ container.getContainerData().getContainerID() + " bcsId is " + container.getContainerData().getContainerID() + " bcsId is "
+ containerBCSId + ".", UNKNOWN_BCSID); + containerBCSId + ".", UNKNOWN_BCSID);
}
byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID()));
if (kData == null) {
throw new StorageContainerException("Unable to find the block." +
blockID, NO_SUCH_BLOCK);
}
ContainerProtos.BlockData blockData =
ContainerProtos.BlockData.parseFrom(kData);
long id = blockData.getBlockID().getBlockCommitSequenceId();
if (id < bcsId) {
throw new StorageContainerException(
"bcsId " + bcsId + " mismatches with existing block Id "
+ id + " for block " + blockID + ".", BCSID_MISMATCH);
}
return BlockData.getFromProtoBuf(blockData);
} }
byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
if (kData == null) {
throw new StorageContainerException("Unable to find the block." + blockID,
NO_SUCH_BLOCK);
}
ContainerProtos.BlockData blockData =
ContainerProtos.BlockData.parseFrom(kData);
long id = blockData.getBlockID().getBlockCommitSequenceId();
if (id < bcsId) {
throw new StorageContainerException(
"bcsId " + bcsId + " mismatches with existing block Id "
+ id + " for block " + blockID + ".", BCSID_MISMATCH);
}
return BlockData.getFromProtoBuf(blockData);
} }
/** /**
@ -187,18 +188,19 @@ public class BlockManagerImpl implements BlockManager {
throws IOException { throws IOException {
KeyValueContainerData containerData = (KeyValueContainerData) container KeyValueContainerData containerData = (KeyValueContainerData) container
.getContainerData(); .getContainerData();
MetadataStore db = BlockUtils.getDB(containerData, config); try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
// This is a post condition that acts as a hint to the user. // This is a post condition that acts as a hint to the user.
// Should never fail. // Should never fail.
Preconditions.checkNotNull(db, "DB cannot be null here"); Preconditions.checkNotNull(db, "DB cannot be null here");
byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID())); byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID()));
if (kData == null) { if (kData == null) {
throw new StorageContainerException("Unable to find the block.", throw new StorageContainerException("Unable to find the block.",
NO_SUCH_BLOCK); NO_SUCH_BLOCK);
}
ContainerProtos.BlockData blockData =
ContainerProtos.BlockData.parseFrom(kData);
return blockData.getSize();
} }
ContainerProtos.BlockData blockData =
ContainerProtos.BlockData.parseFrom(kData);
return blockData.getSize();
} }
/** /**
@ -218,24 +220,24 @@ public class BlockManagerImpl implements BlockManager {
KeyValueContainerData cData = (KeyValueContainerData) container KeyValueContainerData cData = (KeyValueContainerData) container
.getContainerData(); .getContainerData();
MetadataStore db = BlockUtils.getDB(cData, config); try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
// This is a post condition that acts as a hint to the user. // This is a post condition that acts as a hint to the user.
// Should never fail. // Should never fail.
Preconditions.checkNotNull(db, "DB cannot be null here"); Preconditions.checkNotNull(db, "DB cannot be null here");
// Note : There is a race condition here, since get and delete // Note : There is a race condition here, since get and delete
// are not atomic. Leaving it here since the impact is refusing // are not atomic. Leaving it here since the impact is refusing
// to delete a Block which might have just gotten inserted after // to delete a Block which might have just gotten inserted after
// the get check. // the get check.
byte[] kKey = Longs.toByteArray(blockID.getLocalID()); byte[] kKey = Longs.toByteArray(blockID.getLocalID());
byte[] kData = db.get(kKey); try {
if (kData == null) { db.getStore().delete(kKey);
throw new StorageContainerException("Unable to find the block.", } catch (IOException e) {
NO_SUCH_BLOCK); throw new StorageContainerException("Unable to find the block.",
NO_SUCH_BLOCK);
}
// Decrement blockcount here
container.getContainerData().decrKeyCount();
} }
db.delete(kKey);
// Decrement blockcount here
container.getContainerData().decrKeyCount();
} }
/** /**
@ -258,18 +260,19 @@ public class BlockManagerImpl implements BlockManager {
List<BlockData> result = null; List<BlockData> result = null;
KeyValueContainerData cData = (KeyValueContainerData) container KeyValueContainerData cData = (KeyValueContainerData) container
.getContainerData(); .getContainerData();
MetadataStore db = BlockUtils.getDB(cData, config); try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
result = new ArrayList<>(); result = new ArrayList<>();
byte[] startKeyInBytes = Longs.toByteArray(startLocalID); byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
List<Map.Entry<byte[], byte[]>> range = List<Map.Entry<byte[], byte[]>> range =
db.getSequentialRangeKVs(startKeyInBytes, count, db.getStore().getSequentialRangeKVs(startKeyInBytes, count,
MetadataKeyFilters.getNormalKeyFilter()); MetadataKeyFilters.getNormalKeyFilter());
for (Map.Entry<byte[], byte[]> entry : range) { for (Map.Entry<byte[], byte[]> entry : range) {
BlockData value = BlockUtils.getBlockData(entry.getValue()); BlockData value = BlockUtils.getBlockData(entry.getValue());
BlockData data = new BlockData(value.getBlockID()); BlockData data = new BlockData(value.getBlockID());
result.add(data); result.add(data);
}
return result;
} }
return result;
} }
/** /**

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.utils.BackgroundTaskQueue;
import org.apache.hadoop.utils.BackgroundTaskResult; import org.apache.hadoop.utils.BackgroundTaskResult;
import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.BatchOperation;
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -185,69 +185,71 @@ public class BlockDeletingService extends BackgroundService{
ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
long startTime = Time.monotonicNow(); long startTime = Time.monotonicNow();
// Scan container's db and get list of under deletion blocks // Scan container's db and get list of under deletion blocks
MetadataStore meta = BlockUtils.getDB( try (ReferenceCountedDB meta = BlockUtils.getDB(containerData, conf)) {
(KeyValueContainerData) containerData, conf); // # of blocks to delete is throttled
// # of blocks to delete is throttled KeyPrefixFilter filter =
KeyPrefixFilter filter = new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); List<Map.Entry<byte[], byte[]>> toDeleteBlocks =
List<Map.Entry<byte[], byte[]>> toDeleteBlocks = meta.getStore().getSequentialRangeKVs(null, blockLimitPerTask,
meta.getSequentialRangeKVs(null, blockLimitPerTask, filter); filter);
if (toDeleteBlocks.isEmpty()) { if (toDeleteBlocks.isEmpty()) {
LOG.debug("No under deletion block found in container : {}", LOG.debug("No under deletion block found in container : {}",
containerData.getContainerID()); containerData.getContainerID());
} }
List<String> succeedBlocks = new LinkedList<>(); List<String> succeedBlocks = new LinkedList<>();
LOG.debug("Container : {}, To-Delete blocks : {}", LOG.debug("Container : {}, To-Delete blocks : {}",
containerData.getContainerID(), toDeleteBlocks.size()); containerData.getContainerID(), toDeleteBlocks.size());
File dataDir = new File(containerData.getChunksPath()); File dataDir = new File(containerData.getChunksPath());
if (!dataDir.exists() || !dataDir.isDirectory()) { if (!dataDir.exists() || !dataDir.isDirectory()) {
LOG.error("Invalid container data dir {} : " LOG.error("Invalid container data dir {} : "
+ "does not exist or not a directory", dataDir.getAbsolutePath()); + "does not exist or not a directory", dataDir.getAbsolutePath());
return crr;
}
toDeleteBlocks.forEach(entry -> {
String blockName = DFSUtil.bytes2String(entry.getKey());
LOG.debug("Deleting block {}", blockName);
try {
ContainerProtos.BlockData data =
ContainerProtos.BlockData.parseFrom(entry.getValue());
for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
File chunkFile = dataDir.toPath()
.resolve(chunkInfo.getChunkName()).toFile();
if (FileUtils.deleteQuietly(chunkFile)) {
LOG.debug("block {} chunk {} deleted", blockName,
chunkFile.getAbsolutePath());
}
}
succeedBlocks.add(blockName);
} catch (InvalidProtocolBufferException e) {
LOG.error("Failed to parse block info for block {}", blockName, e);
}
});
// Once files are deleted... replace deleting entries with deleted
// entries
BatchOperation batch = new BatchOperation();
succeedBlocks.forEach(entry -> {
String blockId =
entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length());
String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId;
batch.put(DFSUtil.string2Bytes(deletedEntry),
DFSUtil.string2Bytes(blockId));
batch.delete(DFSUtil.string2Bytes(entry));
});
meta.getStore().writeBatch(batch);
// update count of pending deletion blocks in in-memory container status
containerData.decrPendingDeletionBlocks(succeedBlocks.size());
if (!succeedBlocks.isEmpty()) {
LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
containerData.getContainerID(), succeedBlocks.size(),
Time.monotonicNow() - startTime);
}
crr.addAll(succeedBlocks);
return crr; return crr;
} }
toDeleteBlocks.forEach(entry -> {
String blockName = DFSUtil.bytes2String(entry.getKey());
LOG.debug("Deleting block {}", blockName);
try {
ContainerProtos.BlockData data =
ContainerProtos.BlockData.parseFrom(entry.getValue());
for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
File chunkFile = dataDir.toPath()
.resolve(chunkInfo.getChunkName()).toFile();
if (FileUtils.deleteQuietly(chunkFile)) {
LOG.debug("block {} chunk {} deleted", blockName,
chunkFile.getAbsolutePath());
}
}
succeedBlocks.add(blockName);
} catch (InvalidProtocolBufferException e) {
LOG.error("Failed to parse block info for block {}", blockName, e);
}
});
// Once files are deleted... replace deleting entries with deleted entries
BatchOperation batch = new BatchOperation();
succeedBlocks.forEach(entry -> {
String blockId =
entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length());
String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId;
batch.put(DFSUtil.string2Bytes(deletedEntry),
DFSUtil.string2Bytes(blockId));
batch.delete(DFSUtil.string2Bytes(entry));
});
meta.writeBatch(batch);
// update count of pending deletion blocks in in-memory container status
containerData.decrPendingDeletionBlocks(succeedBlocks.size());
if (!succeedBlocks.isEmpty()) {
LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
containerData.getContainerID(), succeedBlocks.size(),
Time.monotonicNow() - startTime);
}
crr.addAll(succeedBlocks);
return crr;
} }
@Override @Override

View File

@ -38,7 +38,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -180,28 +180,31 @@ public class ContainerReader implements Runnable {
KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
KeyValueContainer kvContainer = new KeyValueContainer( KeyValueContainer kvContainer = new KeyValueContainer(
kvContainerData, config); kvContainerData, config);
MetadataStore containerDB = BlockUtils.getDB(kvContainerData, config); try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData,
MetadataKeyFilters.KeyPrefixFilter filter = config)) {
new MetadataKeyFilters.KeyPrefixFilter() MetadataKeyFilters.KeyPrefixFilter filter =
.addFilter(OzoneConsts.DELETING_KEY_PREFIX); new MetadataKeyFilters.KeyPrefixFilter()
int numPendingDeletionBlocks = .addFilter(OzoneConsts.DELETING_KEY_PREFIX);
containerDB.getSequentialRangeKVs(null, Integer.MAX_VALUE, filter) int numPendingDeletionBlocks =
.size(); containerDB.getStore().getSequentialRangeKVs(null,
kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks); Integer.MAX_VALUE, filter)
byte[] delTxnId = containerDB.get( .size();
DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX)); kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
if (delTxnId != null) { byte[] delTxnId = containerDB.getStore().get(
kvContainerData DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX));
.updateDeleteTransactionId(Longs.fromByteArray(delTxnId)); if (delTxnId != null) {
kvContainerData
.updateDeleteTransactionId(Longs.fromByteArray(delTxnId));
}
// sets the BlockCommitSequenceId.
byte[] bcsId = containerDB.getStore().get(DFSUtil.string2Bytes(
OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
if (bcsId != null) {
kvContainerData
.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
}
containerSet.addContainer(kvContainer);
} }
// sets the BlockCommitSequenceId.
byte[] bcsId = containerDB.get(
DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
if (bcsId != null) {
kvContainerData
.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
}
containerSet.addContainer(kvContainer);
} else { } else {
throw new StorageContainerException("Container File is corrupted. " + throw new StorageContainerException("Container File is corrupted. " +
"ContainerType is KeyValueContainer but cast to " + "ContainerType is KeyValueContainer but cast to " +

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -109,30 +109,31 @@ public class TestKeyValueBlockIterator {
createContainerWithBlocks(containerID, normalBlocks, deletedBlocks); createContainerWithBlocks(containerID, normalBlocks, deletedBlocks);
String containerPath = new File(containerData.getMetadataPath()) String containerPath = new File(containerData.getMetadataPath())
.getParent(); .getParent();
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
containerID, new File(containerPath)); containerID, new File(containerPath))) {
int counter = 0; int counter = 0;
while(keyValueBlockIterator.hasNext()) { while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock(); BlockData blockData = keyValueBlockIterator.nextBlock();
assertEquals(blockData.getLocalID(), counter++); assertEquals(blockData.getLocalID(), counter++);
} }
assertFalse(keyValueBlockIterator.hasNext()); assertFalse(keyValueBlockIterator.hasNext());
keyValueBlockIterator.seekToFirst(); keyValueBlockIterator.seekToFirst();
counter = 0; counter = 0;
while(keyValueBlockIterator.hasNext()) { while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock(); BlockData blockData = keyValueBlockIterator.nextBlock();
assertEquals(blockData.getLocalID(), counter++); assertEquals(blockData.getLocalID(), counter++);
} }
assertFalse(keyValueBlockIterator.hasNext()); assertFalse(keyValueBlockIterator.hasNext());
try { try {
keyValueBlockIterator.nextBlock(); keyValueBlockIterator.nextBlock();
} catch (NoSuchElementException ex) { } catch (NoSuchElementException ex) {
GenericTestUtils.assertExceptionContains("Block Iterator reached end " + GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
"for ContainerID " + containerID, ex); "for ContainerID " + containerID, ex);
}
} }
} }
@ -142,17 +143,18 @@ public class TestKeyValueBlockIterator {
createContainerWithBlocks(containerID, 2, 0); createContainerWithBlocks(containerID, 2, 0);
String containerPath = new File(containerData.getMetadataPath()) String containerPath = new File(containerData.getMetadataPath())
.getParent(); .getParent();
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
containerID, new File(containerPath)); containerID, new File(containerPath))) {
long blockID = 0L; long blockID = 0L;
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
try { try {
keyValueBlockIterator.nextBlock(); keyValueBlockIterator.nextBlock();
} catch (NoSuchElementException ex) { } catch (NoSuchElementException ex) {
GenericTestUtils.assertExceptionContains("Block Iterator reached end " + GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
"for ContainerID " + containerID, ex); "for ContainerID " + containerID, ex);
}
} }
} }
@ -162,42 +164,41 @@ public class TestKeyValueBlockIterator {
createContainerWithBlocks(containerID, 2, 0); createContainerWithBlocks(containerID, 2, 0);
String containerPath = new File(containerData.getMetadataPath()) String containerPath = new File(containerData.getMetadataPath())
.getParent(); .getParent();
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
containerID, new File(containerPath)); containerID, new File(containerPath))) {
long blockID = 0L; long blockID = 0L;
// Even calling multiple times hasNext() should not move entry forward. // Even calling multiple times hasNext() should not move entry forward.
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
keyValueBlockIterator.seekToLast(); keyValueBlockIterator.seekToLast();
assertTrue(keyValueBlockIterator.hasNext()); assertTrue(keyValueBlockIterator.hasNext());
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
keyValueBlockIterator.seekToFirst(); keyValueBlockIterator.seekToFirst();
blockID = 0L; blockID = 0L;
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
try { try {
keyValueBlockIterator.nextBlock(); keyValueBlockIterator.nextBlock();
} catch (NoSuchElementException ex) { } catch (NoSuchElementException ex) {
GenericTestUtils.assertExceptionContains("Block Iterator reached end " + GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
"for ContainerID " + containerID, ex); "for ContainerID " + containerID, ex);
}
} }
} }
@Test @Test
@ -208,14 +209,15 @@ public class TestKeyValueBlockIterator {
createContainerWithBlocks(containerId, normalBlocks, deletedBlocks); createContainerWithBlocks(containerId, normalBlocks, deletedBlocks);
String containerPath = new File(containerData.getMetadataPath()) String containerPath = new File(containerData.getMetadataPath())
.getParent(); .getParent();
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
containerId, new File(containerPath), MetadataKeyFilters containerId, new File(containerPath), MetadataKeyFilters
.getDeletingKeyFilter()); .getDeletingKeyFilter())) {
int counter = 5; int counter = 5;
while(keyValueBlockIterator.hasNext()) { while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock(); BlockData blockData = keyValueBlockIterator.nextBlock();
assertEquals(blockData.getLocalID(), counter++); assertEquals(blockData.getLocalID(), counter++);
}
} }
} }
@ -226,11 +228,12 @@ public class TestKeyValueBlockIterator {
createContainerWithBlocks(containerId, 0, 5); createContainerWithBlocks(containerId, 0, 5);
String containerPath = new File(containerData.getMetadataPath()) String containerPath = new File(containerData.getMetadataPath())
.getParent(); .getParent();
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
containerId, new File(containerPath)); containerId, new File(containerPath))) {
//As all blocks are deleted blocks, blocks does not match with normal key //As all blocks are deleted blocks, blocks does not match with normal key
// filter. // filter.
assertFalse(keyValueBlockIterator.hasNext()); assertFalse(keyValueBlockIterator.hasNext());
}
} }
/** /**
@ -251,27 +254,30 @@ public class TestKeyValueBlockIterator {
container = new KeyValueContainer(containerData, conf); container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
.randomUUID().toString()); .randomUUID().toString());
MetadataStore metadataStore = BlockUtils.getDB(containerData, conf); try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
conf)) {
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>(); List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
chunkList.add(info.getProtoBufMessage()); chunkList.add(info.getProtoBufMessage());
for (int i=0; i<normalBlocks; i++) { for (int i = 0; i < normalBlocks; i++) {
BlockID blockID = new BlockID(containerId, i); BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID); BlockData blockData = new BlockData(blockID);
blockData.setChunks(chunkList); blockData.setChunks(chunkList);
metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
.getProtoBufMessage().toByteArray()); blockData
} .getProtoBufMessage().toByteArray());
}
for (int i=normalBlocks; i<deletedBlocks; i++) { for (int i = normalBlocks; i < deletedBlocks; i++) {
BlockID blockID = new BlockID(containerId, i); BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID); BlockData blockData = new BlockData(blockID);
blockData.setChunks(chunkList); blockData.setChunks(chunkList);
metadataStore.put(DFSUtil.string2Bytes(OzoneConsts metadataStore.getStore().put(DFSUtil.string2Bytes(OzoneConsts
.DELETING_KEY_PREFIX + blockID.getLocalID()), blockData .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
.getProtoBufMessage().toByteArray()); .getProtoBufMessage().toByteArray());
}
} }
} }

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -132,23 +132,24 @@ public class TestKeyValueContainer {
private void addBlocks(int count) throws Exception { private void addBlocks(int count) throws Exception {
long containerId = keyValueContainerData.getContainerID(); long containerId = keyValueContainerData.getContainerID();
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainer try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
.getContainerData(), conf); .getContainerData(), conf)) {
for (int i=0; i < count; i++) { for (int i = 0; i < count; i++) {
// Creating BlockData // Creating BlockData
BlockID blockID = new BlockID(containerId, i); BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID); BlockData blockData = new BlockData(blockID);
blockData.addMetadata("VOLUME", "ozone"); blockData.addMetadata("VOLUME", "ozone");
blockData.addMetadata("OWNER", "hdfs"); blockData.addMetadata("OWNER", "hdfs");
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>(); List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), 0), 0, 1024); .getLocalID(), 0), 0, 1024);
chunkList.add(info.getProtoBufMessage()); chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList); blockData.setChunks(chunkList);
metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
.getProtoBufMessage().toByteArray()); blockData
.getProtoBufMessage().toByteArray());
}
} }
} }
@SuppressWarnings("RedundantCast") @SuppressWarnings("RedundantCast")
@ -191,9 +192,12 @@ public class TestKeyValueContainer {
int numberOfKeysToWrite = 12; int numberOfKeysToWrite = 12;
//write one few keys to check the key count after import //write one few keys to check the key count after import
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf); try(ReferenceCountedDB metadataStore =
for (int i = 0; i < numberOfKeysToWrite; i++) { BlockUtils.getDB(keyValueContainerData, conf)) {
metadataStore.put(("test" + i).getBytes(UTF_8), "test".getBytes(UTF_8)); for (int i = 0; i < numberOfKeysToWrite; i++) {
metadataStore.getStore().put(("test" + i).getBytes(UTF_8),
"test".getBytes(UTF_8));
}
} }
BlockUtils.removeDB(keyValueContainerData, conf); BlockUtils.removeDB(keyValueContainerData, conf);

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingP
import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -149,48 +149,50 @@ import static org.junit.Assert.assertTrue;
container = new KeyValueContainer(containerData, conf); container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
UUID.randomUUID().toString()); UUID.randomUUID().toString());
MetadataStore metadataStore = BlockUtils.getDB(containerData, conf); try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
chunkManager = new ChunkManagerImpl(true); conf)) {
chunkManager = new ChunkManagerImpl(true);
assertTrue(containerData.getChunksPath() != null); assertTrue(containerData.getChunksPath() != null);
File chunksPath = new File(containerData.getChunksPath()); File chunksPath = new File(containerData.getChunksPath());
assertTrue(chunksPath.exists()); assertTrue(chunksPath.exists());
// Initially chunks folder should be empty. // Initially chunks folder should be empty.
assertTrue(chunksPath.listFiles().length == 0); assertTrue(chunksPath.listFiles().length == 0);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>(); List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
for (int i = 0; i < (totalBlks); i++) { for (int i = 0; i < (totalBlks); i++) {
BlockID blockID = new BlockID(containerId, i); BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID); BlockData blockData = new BlockData(blockID);
chunkList.clear(); chunkList.clear();
for (chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) { for (chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) {
String chunkName = strBlock + i + strChunk + chunkCount; String chunkName = strBlock + i + strChunk + chunkCount;
long offset = chunkCount * chunkLen; long offset = chunkCount * chunkLen;
ChunkInfo info = new ChunkInfo(chunkName, offset, chunkLen); ChunkInfo info = new ChunkInfo(chunkName, offset, chunkLen);
chunkList.add(info.getProtoBufMessage()); chunkList.add(info.getProtoBufMessage());
chunkManager chunkManager
.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData),
new DispatcherContext.Builder() new DispatcherContext.Builder()
.setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
.build()); .build());
chunkManager chunkManager
.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData),
new DispatcherContext.Builder() new DispatcherContext.Builder()
.setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
.build()); .build());
} }
blockData.setChunks(chunkList); blockData.setChunks(chunkList);
if (i >= normalBlocks) { if (i >= normalBlocks) {
// deleted key // deleted key
metadataStore.put(DFSUtil.string2Bytes( metadataStore.getStore().put(DFSUtil.string2Bytes(
OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()), OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()),
blockData.getProtoBufMessage().toByteArray()); blockData.getProtoBufMessage().toByteArray());
} else { } else {
// normal key // normal key
metadataStore.put(Longs.toByteArray(blockID.getLocalID()), metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
blockData.getProtoBufMessage().toByteArray()); blockData.getProtoBufMessage().toByteArray());
}
} }
} }
} }

View File

@ -40,7 +40,7 @@ import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
@ -119,16 +119,17 @@ public class TestStorageContainerManagerHelper {
public List<String> getPendingDeletionBlocks(Long containerID) public List<String> getPendingDeletionBlocks(Long containerID)
throws IOException { throws IOException {
List<String> pendingDeletionBlocks = Lists.newArrayList(); List<String> pendingDeletionBlocks = Lists.newArrayList();
MetadataStore meta = getContainerMetadata(containerID); ReferenceCountedDB meta = getContainerMetadata(containerID);
KeyPrefixFilter filter = KeyPrefixFilter filter =
new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
List<Map.Entry<byte[], byte[]>> kvs = meta List<Map.Entry<byte[], byte[]>> kvs = meta.getStore()
.getRangeKVs(null, Integer.MAX_VALUE, filter); .getRangeKVs(null, Integer.MAX_VALUE, filter);
kvs.forEach(entry -> { kvs.forEach(entry -> {
String key = DFSUtil.bytes2String(entry.getKey()); String key = DFSUtil.bytes2String(entry.getKey());
pendingDeletionBlocks pendingDeletionBlocks
.add(key.replace(OzoneConsts.DELETING_KEY_PREFIX, "")); .add(key.replace(OzoneConsts.DELETING_KEY_PREFIX, ""));
}); });
meta.close();
return pendingDeletionBlocks; return pendingDeletionBlocks;
} }
@ -143,17 +144,18 @@ public class TestStorageContainerManagerHelper {
public List<Long> getAllBlocks(Long containeID) throws IOException { public List<Long> getAllBlocks(Long containeID) throws IOException {
List<Long> allBlocks = Lists.newArrayList(); List<Long> allBlocks = Lists.newArrayList();
MetadataStore meta = getContainerMetadata(containeID); ReferenceCountedDB meta = getContainerMetadata(containeID);
List<Map.Entry<byte[], byte[]>> kvs = List<Map.Entry<byte[], byte[]>> kvs =
meta.getRangeKVs(null, Integer.MAX_VALUE, meta.getStore().getRangeKVs(null, Integer.MAX_VALUE,
MetadataKeyFilters.getNormalKeyFilter()); MetadataKeyFilters.getNormalKeyFilter());
kvs.forEach(entry -> { kvs.forEach(entry -> {
allBlocks.add(Longs.fromByteArray(entry.getKey())); allBlocks.add(Longs.fromByteArray(entry.getKey()));
}); });
meta.close();
return allBlocks; return allBlocks;
} }
private MetadataStore getContainerMetadata(Long containerID) private ReferenceCountedDB getContainerMetadata(Long containerID)
throws IOException { throws IOException {
ContainerWithPipeline containerWithPipeline = cluster ContainerWithPipeline containerWithPipeline = cluster
.getStorageContainerManager().getClientProtocolServer() .getStorageContainerManager().getClientProtocolServer()

View File

@ -953,18 +953,19 @@ public abstract class TestOzoneRpcClientAbstract {
.getContainerData()); .getContainerData());
String containerPath = new File(containerData.getMetadataPath()) String containerPath = new File(containerData.getMetadataPath())
.getParent(); .getParent();
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
containerID, new File(containerPath)); containerID, new File(containerPath))) {
while (keyValueBlockIterator.hasNext()) { while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock(); BlockData blockData = keyValueBlockIterator.nextBlock();
if (blockData.getBlockID().getLocalID() == localID) { if (blockData.getBlockID().getLocalID() == localID) {
long length = 0; long length = 0;
List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks(); List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunks) { for (ContainerProtos.ChunkInfo chunk : chunks) {
length += chunk.getLen(); length += chunk.getLen();
}
Assert.assertEquals(length, keyValue.getBytes().length);
break;
} }
Assert.assertEquals(length, keyValue.getBytes().length);
break;
} }
} }
} }
@ -1115,31 +1116,32 @@ public abstract class TestOzoneRpcClientAbstract {
(KeyValueContainerData) container.getContainerData(); (KeyValueContainerData) container.getContainerData();
String containerPath = String containerPath =
new File(containerData.getMetadataPath()).getParent(); new File(containerData.getMetadataPath()).getParent();
KeyValueBlockIterator keyValueBlockIterator = try (KeyValueBlockIterator keyValueBlockIterator =
new KeyValueBlockIterator(containerID, new File(containerPath)); new KeyValueBlockIterator(containerID, new File(containerPath))) {
// Find the block corresponding to the key we put. We use the localID of // Find the block corresponding to the key we put. We use the localID of
// the BlockData to identify out key. // the BlockData to identify out key.
BlockData blockData = null; BlockData blockData = null;
while (keyValueBlockIterator.hasNext()) { while (keyValueBlockIterator.hasNext()) {
blockData = keyValueBlockIterator.nextBlock(); blockData = keyValueBlockIterator.nextBlock();
if (blockData.getBlockID().getLocalID() == localID) { if (blockData.getBlockID().getLocalID() == localID) {
break; break;
}
} }
Assert.assertNotNull("Block not found", blockData);
// Get the location of the chunk file
String chunkName = blockData.getChunks().get(0).getChunkName();
String containreBaseDir =
container.getContainerData().getVolume().getHddsRootDir().getPath();
File chunksLocationPath = KeyValueContainerLocationUtil
.getChunksLocationPath(containreBaseDir, scmId, containerID);
File chunkFile = new File(chunksLocationPath, chunkName);
// Corrupt the contents of the chunk file
String newData = new String("corrupted data");
FileUtils.writeByteArrayToFile(chunkFile, newData.getBytes());
} }
Assert.assertNotNull("Block not found", blockData);
// Get the location of the chunk file
String chunkName = blockData.getChunks().get(0).getChunkName();
String containreBaseDir =
container.getContainerData().getVolume().getHddsRootDir().getPath();
File chunksLocationPath = KeyValueContainerLocationUtil
.getChunksLocationPath(containreBaseDir, scmId, containerID);
File chunkFile = new File(chunksLocationPath, chunkName);
// Corrupt the contents of the chunk file
String newData = new String("corrupted data");
FileUtils.writeByteArrayToFile(chunkFile, newData.getBytes());
} }
@Test @Test

View File

@ -45,7 +45,7 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundService;
import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -119,35 +119,36 @@ public class TestBlockDeletingService {
containerSet.addContainer(container); containerSet.addContainer(container);
data = (KeyValueContainerData) containerSet.getContainer( data = (KeyValueContainerData) containerSet.getContainer(
containerID).getContainerData(); containerID).getContainerData();
MetadataStore metadata = BlockUtils.getDB(data, conf); try(ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) {
for (int j = 0; j<numOfBlocksPerContainer; j++) { for (int j = 0; j < numOfBlocksPerContainer; j++) {
BlockID blockID = BlockID blockID =
ContainerTestHelper.getTestBlockID(containerID); ContainerTestHelper.getTestBlockID(containerID);
String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX + String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX +
blockID.getLocalID(); blockID.getLocalID();
BlockData kd = new BlockData(blockID); BlockData kd = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList(); List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
for (int k = 0; k<numOfChunksPerBlock; k++) { for (int k = 0; k < numOfChunksPerBlock; k++) {
// offset doesn't matter here // offset doesn't matter here
String chunkName = blockID.getLocalID() + "_chunk_" + k; String chunkName = blockID.getLocalID() + "_chunk_" + k;
File chunk = new File(data.getChunksPath(), chunkName); File chunk = new File(data.getChunksPath(), chunkName);
FileUtils.writeStringToFile(chunk, "a chunk", FileUtils.writeStringToFile(chunk, "a chunk",
Charset.defaultCharset()); Charset.defaultCharset());
LOG.info("Creating file {}", chunk.getAbsolutePath()); LOG.info("Creating file {}", chunk.getAbsolutePath());
// make sure file exists // make sure file exists
Assert.assertTrue(chunk.isFile() && chunk.exists()); Assert.assertTrue(chunk.isFile() && chunk.exists());
ContainerProtos.ChunkInfo info = ContainerProtos.ChunkInfo info =
ContainerProtos.ChunkInfo.newBuilder() ContainerProtos.ChunkInfo.newBuilder()
.setChunkName(chunk.getAbsolutePath()) .setChunkName(chunk.getAbsolutePath())
.setLen(0) .setLen(0)
.setOffset(0) .setOffset(0)
.setChecksumData(Checksum.getNoChecksumDataProto()) .setChecksumData(Checksum.getNoChecksumDataProto())
.build(); .build();
chunks.add(info); chunks.add(info);
}
kd.setChunks(chunks);
metadata.getStore().put(DFSUtil.string2Bytes(deleteStateName),
kd.getProtoBufMessage().toByteArray());
} }
kd.setChunks(chunks);
metadata.put(DFSUtil.string2Bytes(deleteStateName),
kd.getProtoBufMessage().toByteArray());
} }
} }
} }
@ -166,17 +167,19 @@ public class TestBlockDeletingService {
* Get under deletion blocks count from DB, * Get under deletion blocks count from DB,
* note this info is parsed from container.db. * note this info is parsed from container.db.
*/ */
private int getUnderDeletionBlocksCount(MetadataStore meta) private int getUnderDeletionBlocksCount(ReferenceCountedDB meta)
throws IOException { throws IOException {
List<Map.Entry<byte[], byte[]>> underDeletionBlocks = List<Map.Entry<byte[], byte[]>> underDeletionBlocks =
meta.getRangeKVs(null, 100, new MetadataKeyFilters.KeyPrefixFilter() meta.getStore().getRangeKVs(null, 100,
new MetadataKeyFilters.KeyPrefixFilter()
.addFilter(OzoneConsts.DELETING_KEY_PREFIX)); .addFilter(OzoneConsts.DELETING_KEY_PREFIX));
return underDeletionBlocks.size(); return underDeletionBlocks.size();
} }
private int getDeletedBlocksCount(MetadataStore db) throws IOException { private int getDeletedBlocksCount(ReferenceCountedDB db) throws IOException {
List<Map.Entry<byte[], byte[]>> underDeletionBlocks = List<Map.Entry<byte[], byte[]>> underDeletionBlocks =
db.getRangeKVs(null, 100, new MetadataKeyFilters.KeyPrefixFilter() db.getStore().getRangeKVs(null, 100,
new MetadataKeyFilters.KeyPrefixFilter()
.addFilter(OzoneConsts.DELETED_KEY_PREFIX)); .addFilter(OzoneConsts.DELETED_KEY_PREFIX));
return underDeletionBlocks.size(); return underDeletionBlocks.size();
} }
@ -202,37 +205,38 @@ public class TestBlockDeletingService {
containerSet.listContainer(0L, 1, containerData); containerSet.listContainer(0L, 1, containerData);
Assert.assertEquals(1, containerData.size()); Assert.assertEquals(1, containerData.size());
MetadataStore meta = BlockUtils.getDB( try(ReferenceCountedDB meta = BlockUtils.getDB(
(KeyValueContainerData) containerData.get(0), conf); (KeyValueContainerData) containerData.get(0), conf)) {
Map<Long, Container> containerMap = containerSet.getContainerMapCopy(); Map<Long, Container> containerMap = containerSet.getContainerMapCopy();
// NOTE: this test assumes that all the container is KetValueContainer and // NOTE: this test assumes that all the container is KetValueContainer and
// have DeleteTransactionId in KetValueContainerData. If other // have DeleteTransactionId in KetValueContainerData. If other
// types is going to be added, this test should be checked. // types is going to be added, this test should be checked.
long transactionId = ((KeyValueContainerData)containerMap long transactionId = ((KeyValueContainerData) containerMap
.get(containerData.get(0).getContainerID()).getContainerData()) .get(containerData.get(0).getContainerID()).getContainerData())
.getDeleteTransactionId(); .getDeleteTransactionId();
// Number of deleted blocks in container should be equal to 0 before // Number of deleted blocks in container should be equal to 0 before
// block delete // block delete
Assert.assertEquals(0, transactionId); Assert.assertEquals(0, transactionId);
// Ensure there are 3 blocks under deletion and 0 deleted blocks // Ensure there are 3 blocks under deletion and 0 deleted blocks
Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); Assert.assertEquals(3, getUnderDeletionBlocksCount(meta));
Assert.assertEquals(0, getDeletedBlocksCount(meta)); Assert.assertEquals(0, getDeletedBlocksCount(meta));
// An interval will delete 1 * 2 blocks // An interval will delete 1 * 2 blocks
deleteAndWait(svc, 1); deleteAndWait(svc, 1);
Assert.assertEquals(1, getUnderDeletionBlocksCount(meta)); Assert.assertEquals(1, getUnderDeletionBlocksCount(meta));
Assert.assertEquals(2, getDeletedBlocksCount(meta)); Assert.assertEquals(2, getDeletedBlocksCount(meta));
deleteAndWait(svc, 2); deleteAndWait(svc, 2);
Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); Assert.assertEquals(0, getUnderDeletionBlocksCount(meta));
Assert.assertEquals(3, getDeletedBlocksCount(meta)); Assert.assertEquals(3, getDeletedBlocksCount(meta));
deleteAndWait(svc, 3); deleteAndWait(svc, 3);
Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); Assert.assertEquals(0, getUnderDeletionBlocksCount(meta));
Assert.assertEquals(3, getDeletedBlocksCount(meta)); Assert.assertEquals(3, getDeletedBlocksCount(meta));
}
svc.shutdown(); svc.shutdown();
} }
@ -311,25 +315,26 @@ public class TestBlockDeletingService {
// get container meta data // get container meta data
List<ContainerData> containerData = Lists.newArrayList(); List<ContainerData> containerData = Lists.newArrayList();
containerSet.listContainer(0L, 1, containerData); containerSet.listContainer(0L, 1, containerData);
MetadataStore meta = BlockUtils.getDB( try(ReferenceCountedDB meta = BlockUtils.getDB(
(KeyValueContainerData) containerData.get(0), conf); (KeyValueContainerData) containerData.get(0), conf)) {
LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG);
GenericTestUtils.waitFor(() -> { GenericTestUtils.waitFor(() -> {
try { try {
if (getUnderDeletionBlocksCount(meta) == 0) { if (getUnderDeletionBlocksCount(meta) == 0) {
return true; return true;
}
} catch (IOException ignored) {
} }
} catch (IOException ignored) { return false;
} }, 1000, 100000);
return false; newLog.stopCapturing();
}, 1000, 100000);
newLog.stopCapturing();
// The block deleting successfully and shouldn't catch timed // The block deleting successfully and shouldn't catch timed
// out warning log. // out warning log.
Assert.assertTrue(!newLog.getOutput().contains( Assert.assertTrue(!newLog.getOutput().contains(
"Background task executes timed out, retrying in next interval")); "Background task executes timed out, retrying in next interval"));
}
svc.shutdown(); svc.shutdown();
} }

View File

@ -47,7 +47,7 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
@ -202,7 +202,7 @@ public class TestContainerPersistence {
Path meta = kvData.getDbFile().toPath().getParent(); Path meta = kvData.getDbFile().toPath().getParent();
Assert.assertTrue(meta != null && Files.exists(meta)); Assert.assertTrue(meta != null && Files.exists(meta));
MetadataStore store = null; ReferenceCountedDB store = null;
try { try {
store = BlockUtils.getDB(kvData, conf); store = BlockUtils.getDB(kvData, conf);
Assert.assertNotNull(store); Assert.assertNotNull(store);

View File

@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.ozShell.TestOzoneShell;
import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.junit.Assert; import org.junit.Assert;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -300,9 +300,12 @@ public class TestBlockDeletion {
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet(); .getContainer().getContainerSet();
OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
MetadataStore db = BlockUtils.getDB((KeyValueContainerData) dnContainerSet try(ReferenceCountedDB db =
.getContainer(blockID.getContainerID()).getContainerData(), conf); BlockUtils.getDB((KeyValueContainerData) dnContainerSet
Assert.assertNotNull(db.get(Longs.toByteArray(blockID.getLocalID()))); .getContainer(blockID.getContainerID()).getContainerData(), conf)) {
Assert.assertNotNull(db.getStore().get(
Longs.toByteArray(blockID.getLocalID())));
}
}, omKeyLocationInfoGroups); }, omKeyLocationInfoGroups);
} }
@ -312,13 +315,16 @@ public class TestBlockDeletion {
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet(); .getContainer().getContainerSet();
OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
MetadataStore db = BlockUtils.getDB((KeyValueContainerData) dnContainerSet try(ReferenceCountedDB db =
.getContainer(blockID.getContainerID()).getContainerData(), conf); BlockUtils.getDB((KeyValueContainerData) dnContainerSet
Assert.assertNull(db.get(Longs.toByteArray(blockID.getLocalID()))); .getContainer(blockID.getContainerID()).getContainerData(), conf)) {
Assert.assertNull(db.get(DFSUtil.string2Bytes( Assert.assertNull(db.getStore().get(
OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()))); Longs.toByteArray(blockID.getLocalID())));
Assert.assertNotNull(DFSUtil Assert.assertNull(db.getStore().get(DFSUtil.string2Bytes(
.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID())); OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID())));
Assert.assertNotNull(DFSUtil.string2Bytes(
OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID()));
}
containerIdsWithDeletedBlocks.add(blockID.getContainerID()); containerIdsWithDeletedBlocks.add(blockID.getContainerID());
}, omKeyLocationInfoGroups); }, omKeyLocationInfoGroups);
} }

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -226,7 +226,7 @@ public class TestCloseContainerByPipeline {
List<DatanodeDetails> datanodes = pipeline.getNodes(); List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(3, datanodes.size()); Assert.assertEquals(3, datanodes.size());
List<MetadataStore> metadataStores = new ArrayList<>(datanodes.size()); List<ReferenceCountedDB> metadataStores = new ArrayList<>(datanodes.size());
for (DatanodeDetails details : datanodes) { for (DatanodeDetails details : datanodes) {
Assert.assertFalse(isContainerClosed(cluster, containerID, details)); Assert.assertFalse(isContainerClosed(cluster, containerID, details));
//send the order to close the container //send the order to close the container
@ -237,8 +237,10 @@ public class TestCloseContainerByPipeline {
Container dnContainer = cluster.getHddsDatanodes().get(index) Container dnContainer = cluster.getHddsDatanodes().get(index)
.getDatanodeStateMachine().getContainer().getContainerSet() .getDatanodeStateMachine().getContainer().getContainerSet()
.getContainer(containerID); .getContainer(containerID);
metadataStores.add(BlockUtils.getDB((KeyValueContainerData) dnContainer try(ReferenceCountedDB store = BlockUtils.getDB(
.getContainerData(), conf)); (KeyValueContainerData) dnContainer.getContainerData(), conf)) {
metadataStores.add(store);
}
} }
// There should be as many rocks db as the number of datanodes in pipeline. // There should be as many rocks db as the number of datanodes in pipeline.