HDDS-1720 : Add ability to configure RocksDB logs for Ozone Manager.

Signed-off-by: Anu Engineer <aengineer@apache.org>
This commit is contained in:
Aravindan Vijayan 2019-09-27 00:10:08 -07:00 committed by Anu Engineer
parent 944668674b
commit 76605f17dd
4 changed files with 189 additions and 10 deletions

View File

@ -22,11 +22,13 @@ package org.apache.hadoop.hdds.utils.db;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.eclipse.jetty.util.StringUtil;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyOptions;
import org.rocksdb.DBOptions;
import org.rocksdb.InfoLogLevel;
import org.rocksdb.RocksDB;
import org.rocksdb.Statistics;
import org.rocksdb.StatsLevel;
@ -54,6 +56,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKS
public final class DBStoreBuilder {
private static final Logger LOG =
LoggerFactory.getLogger(DBStoreBuilder.class);
public static final Logger ROCKS_DB_LOGGER =
LoggerFactory.getLogger(RocksDB.class);
private Set<TableConfig> tables;
private DBProfile dbProfile;
private DBOptions rocksDBOption;
@ -63,8 +67,9 @@ public final class DBStoreBuilder {
private Configuration configuration;
private CodecRegistry registry;
private String rocksDbStat;
private RocksDBConfiguration rocksDBConfiguration;
private DBStoreBuilder(Configuration configuration) {
private DBStoreBuilder(OzoneConfiguration configuration) {
tables = new HashSet<>();
tableNames = new LinkedList<>();
this.configuration = configuration;
@ -72,9 +77,11 @@ public final class DBStoreBuilder {
this.rocksDbStat = configuration.getTrimmed(
OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
this.rocksDBConfiguration =
configuration.getObject(RocksDBConfiguration.class);
}
public static DBStoreBuilder newBuilder(Configuration configuration) {
public static DBStoreBuilder newBuilder(OzoneConfiguration configuration) {
return new DBStoreBuilder(configuration);
}
@ -199,6 +206,19 @@ public final class DBStoreBuilder {
option = dbProfile.getDBOptions();
}
if (rocksDBConfiguration.isRocksdbLoggingEnabled()) {
org.rocksdb.Logger logger = new org.rocksdb.Logger(option) {
@Override
protected void log(InfoLogLevel infoLogLevel, String s) {
ROCKS_DB_LOGGER.info(s);
}
};
InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration
.getRocksdbLogLevel() + "_LEVEL");
logger.setInfoLogLevel(level);
option.setLogger(logger);
}
if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
Statistics statistics = new Statistics();
statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));

View File

@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.utils.db;
import org.apache.hadoop.hdds.conf.Config;
import org.apache.hadoop.hdds.conf.ConfigGroup;
import org.apache.hadoop.hdds.conf.ConfigTag;
import org.apache.hadoop.hdds.conf.ConfigType;
/**
* Holds configuration items for OM RocksDB.
*/
@ConfigGroup(prefix = "hadoop.hdds.db")
public class RocksDBConfiguration {
private boolean rocksdbLogEnabled;
@Config(key = "rocksdb.logging.enabled",
type = ConfigType.BOOLEAN,
defaultValue = "false",
tags = {ConfigTag.OM},
description = "Enable/Disable RocksDB logging for OM.")
public void setRocksdbLoggingEnabled(boolean enabled) {
this.rocksdbLogEnabled = enabled;
}
public boolean isRocksdbLoggingEnabled() {
return rocksdbLogEnabled;
}
private String rocksdbLogLevel;
@Config(key = "rocksdb.logging.level",
type = ConfigType.STRING,
defaultValue = "INFO",
tags = {ConfigTag.OM},
description = "OM RocksDB logging level (INFO/DEBUG/WARN/ERROR/FATAL)")
public void setRocksdbLogLevel(String level) {
this.rocksdbLogLevel = level;
}
public String getRocksdbLogLevel() {
return rocksdbLogLevel;
}
}

View File

@ -20,7 +20,7 @@
package org.apache.hadoop.hdds.utils.db;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
@ -50,14 +50,14 @@ public class TestDBStoreBuilder {
@Test
public void builderWithoutAnyParams() throws IOException {
Configuration conf = new Configuration();
OzoneConfiguration conf = new OzoneConfiguration();
thrown.expect(IOException.class);
DBStoreBuilder.newBuilder(conf).build();
}
@Test
public void builderWithOneParamV1() throws IOException {
Configuration conf = new Configuration();
OzoneConfiguration conf = new OzoneConfiguration();
thrown.expect(IOException.class);
DBStoreBuilder.newBuilder(conf)
.setName("Test.db")
@ -66,7 +66,7 @@ public class TestDBStoreBuilder {
@Test
public void builderWithOneParamV2() throws IOException {
Configuration conf = new Configuration();
OzoneConfiguration conf = new OzoneConfiguration();
File newFolder = folder.newFolder();
if(!newFolder.exists()) {
Assert.assertTrue(newFolder.mkdirs());
@ -79,7 +79,7 @@ public class TestDBStoreBuilder {
@Test
public void builderWithOpenClose() throws Exception {
Configuration conf = new Configuration();
OzoneConfiguration conf = new OzoneConfiguration();
File newFolder = folder.newFolder();
if(!newFolder.exists()) {
Assert.assertTrue(newFolder.mkdirs());
@ -94,7 +94,7 @@ public class TestDBStoreBuilder {
@Test
public void builderWithDoubleTableName() throws Exception {
Configuration conf = new Configuration();
OzoneConfiguration conf = new OzoneConfiguration();
File newFolder = folder.newFolder();
if(!newFolder.exists()) {
Assert.assertTrue(newFolder.mkdirs());
@ -112,7 +112,7 @@ public class TestDBStoreBuilder {
@Test
public void builderWithDataWrites() throws Exception {
Configuration conf = new Configuration();
OzoneConfiguration conf = new OzoneConfiguration();
File newFolder = folder.newFolder();
if(!newFolder.exists()) {
Assert.assertTrue(newFolder.mkdirs());
@ -141,7 +141,7 @@ public class TestDBStoreBuilder {
@Test
public void builderWithDiskProfileWrites() throws Exception {
Configuration conf = new Configuration();
OzoneConfiguration conf = new OzoneConfiguration();
File newFolder = folder.newFolder();
if(!newFolder.exists()) {
Assert.assertTrue(newFolder.mkdirs());

View File

@ -0,0 +1,97 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.om;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
/**
* Test RocksDB logging for Ozone Manager.
*/
public class TestOzoneManagerRocksDBLogging {
private MiniOzoneCluster cluster = null;
private OzoneConfiguration conf;
private String clusterId;
private String scmId;
private String omId;
@Rule
public Timeout timeout = new Timeout(60000);
@Before
public void init() throws Exception {
conf = new OzoneConfiguration();
conf.set("hadoop.hdds.db.rocksdb.logging.enabled", "true");
clusterId = UUID.randomUUID().toString();
scmId = UUID.randomUUID().toString();
omId = UUID.randomUUID().toString();
cluster = MiniOzoneCluster.newBuilder(conf)
.setClusterId(clusterId)
.setScmId(scmId)
.setOmId(omId)
.build();
cluster.waitForClusterToBeReady();
}
/**
* Shutdown MiniDFSCluster.
*/
@After
public void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testOMRocksDBLoggingEnabled() throws Exception {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(DBStoreBuilder.ROCKS_DB_LOGGER);
cluster.restartOzoneManager();
GenericTestUtils.waitFor(() -> logCapturer.getOutput()
.contains("db_impl.cc"),
1000, 10000);
cluster.getConf().set("hadoop.hdds.db.rocksdb.logging.enabled", "false");
cluster.restartOzoneManager();
logCapturer.clearOutput();
try {
GenericTestUtils.waitFor(() -> logCapturer.getOutput()
.contains("db_impl.cc"),
1000, 10000);
Assert.fail();
} catch (TimeoutException ex) {
Assert.assertTrue(ex.getMessage().contains("Timed out"));
}
}
}