HDFS-11728. Ozone: add the DB names to OzoneConsts. Contributed by Chen Liang.

This commit is contained in:
Weiwei Yang 2017-05-02 11:02:26 +08:00 committed by Owen O'Malley
parent 6b3da448eb
commit b581cde542
4 changed files with 17 additions and 5 deletions

View File

@ -64,7 +64,6 @@ public final class OzoneConsts {
public static final String CONTAINER_DATA_PATH = "data"; public static final String CONTAINER_DATA_PATH = "data";
public static final String CONTAINER_ROOT_PREFIX = "repository"; public static final String CONTAINER_ROOT_PREFIX = "repository";
public static final String CONTAINER_DB = "container.db";
public static final String FILE_HASH = "SHA-256"; public static final String FILE_HASH = "SHA-256";
public final static String CHUNK_OVERWRITE = "OverWriteRequested"; public final static String CHUNK_OVERWRITE = "OverWriteRequested";
@ -74,6 +73,14 @@ public final class OzoneConsts {
public static final long GB = MB * 1024L; public static final long GB = MB * 1024L;
public static final long TB = GB * 1024L; public static final long TB = GB * 1024L;
/**
* level DB names used by SCM and data nodes.
*/
public static final String CONTAINER_DB = "container.db";
public static final String BLOCK_DB = "block.db";
public static final String NODEPOOL_DB = "nodepool.db";
public static final String OPEN_CONTAINERS_DB = "openContainers.db";
/** /**
* Supports Bucket Versioning. * Supports Bucket Versioning.
*/ */

View File

@ -47,6 +47,8 @@
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.UUID; import java.util.UUID;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
import static org.apache.hadoop.ozone.scm.exceptions.SCMException import static org.apache.hadoop.ozone.scm.exceptions.SCMException
.ResultCodes.CHILL_MODE_EXCEPTION; .ResultCodes.CHILL_MODE_EXCEPTION;
import static org.apache.hadoop.ozone.scm.exceptions.SCMException import static org.apache.hadoop.ozone.scm.exceptions.SCMException
@ -105,7 +107,7 @@ public BlockManagerImpl(final Configuration conf,
options.createIfMissing(); options.createIfMissing();
// Write the block key to container name mapping. // Write the block key to container name mapping.
File blockContainerDbPath = new File(scmMetaDataDir, "block.db"); File blockContainerDbPath = new File(scmMetaDataDir, BLOCK_DB);
blockStore = new LevelDBStore(blockContainerDbPath, options); blockStore = new LevelDBStore(blockContainerDbPath, options);
this.containerSize = OzoneConsts.GB * conf.getInt( this.containerSize = OzoneConsts.GB * conf.getInt(
@ -113,7 +115,7 @@ public BlockManagerImpl(final Configuration conf,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
// Load store of all open contains for block allocation // Load store of all open contains for block allocation
File openContainsDbPath = new File(scmMetaDataDir, "openContainers.db"); File openContainsDbPath = new File(scmMetaDataDir, OPEN_CONTAINERS_DB);
openContainerStore = new LevelDBStore(openContainsDbPath, options); openContainerStore = new LevelDBStore(openContainsDbPath, options);
openContainers = new HashMap<>(); openContainers = new HashMap<>();
loadOpenContainers(); loadOpenContainers();

View File

@ -43,6 +43,8 @@
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
/** /**
* Mapping class contains the mapping from a name to a pipeline mapping. This is * Mapping class contains the mapping from a name to a pipeline mapping. This is
* used by SCM when allocating new locations and when looking up a key. * used by SCM when allocating new locations and when looking up a key.
@ -88,7 +90,7 @@ public ContainerMapping(final Configuration conf,
options.createIfMissing(); options.createIfMissing();
// Write the container name to pipeline mapping. // Write the container name to pipeline mapping.
File containerDBPath = new File(scmMetaDataDir, "container.db"); File containerDBPath = new File(scmMetaDataDir, CONTAINER_DB);
containerStore = new LevelDBStore(containerDBPath, options); containerStore = new LevelDBStore(containerDBPath, options);
this.lock = new ReentrantLock(); this.lock = new ReentrantLock();

View File

@ -43,6 +43,7 @@
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.apache.hadoop.ozone.scm import static org.apache.hadoop.ozone.scm
.exceptions.SCMException.ResultCodes.FAILED_TO_LOAD_NODEPOOL; .exceptions.SCMException.ResultCodes.FAILED_TO_LOAD_NODEPOOL;
import static org.apache.hadoop.ozone.scm import static org.apache.hadoop.ozone.scm
@ -90,7 +91,7 @@ public SCMNodePoolManager(final OzoneConfiguration conf)
options.cacheSize(cacheSize * OzoneConsts.MB); options.cacheSize(cacheSize * OzoneConsts.MB);
options.createIfMissing(); options.createIfMissing();
File nodePoolDBPath = new File(scmMetaDataDir, "nodepool.db"); File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB);
nodePoolStore = new LevelDBStore(nodePoolDBPath, options); nodePoolStore = new LevelDBStore(nodePoolDBPath, options);
nodePools = new HashMap<>(); nodePools = new HashMap<>();
lock = new ReentrantReadWriteLock(); lock = new ReentrantReadWriteLock();