HDFS-12908. Ozone: write chunk call fails because of Metrics registry exception. Contributed by Mukul Kumar Singh.

This commit is contained in:
Nanda kumar 2017-12-21 18:56:30 +05:30 committed by Owen O'Malley
parent 4b9f66a19e
commit ce19f09bd0
7 changed files with 22 additions and 13 deletions

View File

@ -76,7 +76,9 @@ public final class OzoneConsts {
/**
* level DB names used by SCM and data nodes.
*/
public static final String CONTAINER_DB = "container.db";
public static final String CONTAINER_DB_SUFFIX = "container.db";
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
public static final String BLOCK_DB = "block.db";
public static final String NODEPOOL_DB = "nodepool.db";
public static final String OPEN_CONTAINERS_DB = "openContainers.db";

View File

@ -230,13 +230,18 @@ public final class ContainerUtils {
}
}
public static String getContainerDbFileName(String containerName) {
return containerName + OzoneConsts.DN_CONTAINER_DB;
}
/**
* creates a Metadata DB for the specified container.
*
* @param containerPath - Container Path.
* @throws IOException
*/
public static Path createMetadata(Path containerPath, Configuration conf)
public static Path createMetadata(Path containerPath, String containerName,
Configuration conf)
throws IOException {
Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class);
Preconditions.checkNotNull(containerPath);
@ -250,7 +255,8 @@ public final class ContainerUtils {
MetadataStore store = MetadataStoreBuilder.newBuilder()
.setConf(conf)
.setCreateIfMissing(true)
.setDbFile(metadataPath.resolve(OzoneConsts.CONTAINER_DB).toFile())
.setDbFile(metadataPath
.resolve(getContainerDbFileName(containerName)).toFile())
.build();
// we close since the SCM pre-creates containers.

View File

@ -60,7 +60,7 @@ import java.util.HashSet;
import java.util.Set;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX;
@ -293,7 +293,7 @@ public class SQLCLI extends Configured implements Tool {
}
}
LOG.info("Parent path [{}] db name [{}]", parentPath, dbName);
if (dbName.toString().equals(CONTAINER_DB)) {
if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
LOG.info("Converting container DB");
convertContainerDB(dbPath, outPath);
} else if (dbName.toString().equals(BLOCK_DB)) {

View File

@ -55,7 +55,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes.FAILED_TO_CHANGE_CONTAINER_STATE;
/**
@ -102,7 +102,7 @@ public class ContainerMapping implements Mapping {
File metaDir = OzoneUtils.getOzoneMetaDirPath(conf);
// Write the container name to pipeline mapping.
File containerDBPath = new File(metaDir, CONTAINER_DB);
File containerDBPath = new File(metaDir, SCM_CONTAINER_DB);
containerStore =
MetadataStoreBuilder.newBuilder()
.setConf(conf)

View File

@ -23,11 +23,8 @@ import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
import org.rocksdb.DBOptions;
import org.rocksdb.RocksIterator;
import org.rocksdb.Options;
import org.rocksdb.Statistics;
import org.rocksdb.StatsLevel;
import org.rocksdb.WriteOptions;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
@ -76,6 +73,10 @@ public class RocksDBStore implements MetadataStore {
jmxProperties.put("dbName", dbFile.getName());
statMBeanName = MBeans.register("Ozone", "RocksDbStore", jmxProperties,
new RocksDBStoreMBean(dbOptions.statistics()));
if (statMBeanName == null) {
LOG.warn("jmx registration failed during RocksDB init, db path :{}",
dbFile.getAbsolutePath());
}
}
} catch (RocksDBException e) {
throw new IOException(

View File

@ -164,7 +164,7 @@ public class TestContainerPersistence {
}
// Clean up SCM metadata
log.info("Deletting {}", path);
log.info("Deleting {}", path);
FileUtils.deleteDirectory(new File(path));
// Clean up SCM datanode container metadata/data

View File

@ -53,7 +53,7 @@ import java.util.Collection;
import java.util.HashMap;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.KB;
import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
//import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
@ -234,7 +234,7 @@ public class TestContainerSQLCli {
// TODO : the following will fail due to empty Datanode list, need to fix.
//String dnUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + CONTAINER_DB;
String dbPath = dbRootPath + "/" + SCM_CONTAINER_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
Connection conn;
String sql;