HDFS-13444. Ozone: Fix checkstyle issues in HDFS-7240. Contributed by Lokesh Jain.

This commit is contained in:
Nanda kumar 2018-04-17 16:11:47 +05:30 committed by Owen O'Malley
parent 3d18ca4926
commit 17974ba3a6
35 changed files with 254 additions and 184 deletions

View File

@ -1,6 +1,6 @@
package org.apache.hadoop.hdds; package org.apache.hadoop.hdds;
public class HddsConfigKeys { public final class HddsConfigKeys {
private HddsConfigKeys() { private HddsConfigKeys() {
} }
} }

View File

@ -48,7 +48,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
/** /**
* HDDS specific stateless utility functions. * HDDS specific stateless utility functions.
*/ */
public class HddsUtils { public final class HddsUtils {
private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class); private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);

View File

@ -53,7 +53,8 @@ public class HddsConfServlet extends HttpServlet {
private static final String COMMAND = "cmd"; private static final String COMMAND = "cmd";
private static final OzoneConfiguration OZONE_CONFIG = private static final OzoneConfiguration OZONE_CONFIG =
new OzoneConfiguration(); new OzoneConfiguration();
transient Logger LOG = LoggerFactory.getLogger(HddsConfServlet.class); private static final transient Logger LOG =
LoggerFactory.getLogger(HddsConfServlet.class);
/** /**
@ -152,25 +153,25 @@ public class HddsConfServlet extends HttpServlet {
Configuration config = getOzoneConfig(); Configuration config = getOzoneConfig();
switch (cmd) { switch (cmd) {
case "getOzoneTags": case "getOzoneTags":
out.write(gson.toJson(config.get("ozone.system.tags").split(","))); out.write(gson.toJson(config.get("ozone.system.tags").split(",")));
break; break;
case "getPropertyByTag": case "getPropertyByTag":
String tags = request.getParameter("tags"); String tags = request.getParameter("tags");
Map<String, Properties> propMap = new HashMap<>(); Map<String, Properties> propMap = new HashMap<>();
for (String tag : tags.split(",")) { for (String tag : tags.split(",")) {
if (config.isPropertyTag(tag)) { if (config.isPropertyTag(tag)) {
Properties properties = config.getAllPropertiesByTag(tag); Properties properties = config.getAllPropertiesByTag(tag);
propMap.put(tag, properties); propMap.put(tag, properties);
} else { } else {
LOG.debug("Not a valid tag" + tag); LOG.debug("Not a valid tag" + tag);
}
} }
out.write(gson.toJsonTree(propMap).toString()); }
break; out.write(gson.toJsonTree(propMap).toString());
default: break;
throw new IllegalArgumentException(cmd + " is not a valid command."); default:
throw new IllegalArgumentException(cmd + " is not a valid command.");
} }
} }

View File

@ -0,0 +1,18 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container;

View File

@ -0,0 +1,19 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.utils;

View File

@ -55,7 +55,10 @@ import static org.apache.hadoop.hdds.server.ServerUtils.sanitizeUserArgs;
/** /**
* Hdds stateless helper functions for server side components. * Hdds stateless helper functions for server side components.
*/ */
public class HddsServerUtil { public final class HddsServerUtil {
private HddsServerUtil() {
}
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
HddsServerUtil.class); HddsServerUtil.class);

View File

@ -0,0 +1,19 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm;

View File

@ -227,7 +227,7 @@ public class HddsDatanodeService implements ServicePlugin {
return new HddsDatanodeService(conf); return new HddsDatanodeService(conf);
} }
public static void main(String args[]) { public static void main(String[] args) {
try { try {
StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG); StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
HddsDatanodeService hddsDatanodeService = HddsDatanodeService hddsDatanodeService =

View File

@ -107,6 +107,8 @@ public class ChunkManagerImpl implements ChunkManager {
containerManager.incrWriteCount(containerName); containerManager.incrWriteCount(containerName);
containerManager.incrWriteBytes(containerName, info.getLen()); containerManager.incrWriteBytes(containerName, info.getLen());
break; break;
default:
throw new IOException("Can not identify write operation.");
} }
} catch (ExecutionException | NoSuchAlgorithmException | IOException e) { } catch (ExecutionException | NoSuchAlgorithmException | IOException e) {
LOG.error("write data failed. error: {}", e); LOG.error("write data failed. error: {}", e);

View File

@ -464,7 +464,7 @@ public class Dispatcher implements ContainerDispatcher {
byte[] data = null; byte[] data = null;
if (msg.getWriteChunk().getStage() == ContainerProtos.Stage.WRITE_DATA if (msg.getWriteChunk().getStage() == ContainerProtos.Stage.WRITE_DATA
|| msg.getWriteChunk().getStage() == ContainerProtos.Stage.COMBINED) { || msg.getWriteChunk().getStage() == ContainerProtos.Stage.COMBINED) {
data = msg.getWriteChunk().getData().toByteArray(); data = msg.getWriteChunk().getData().toByteArray();
metrics.incContainerBytesStats(Type.WriteChunk, data.length); metrics.incContainerBytesStats(Type.WriteChunk, data.length);
} }

View File

@ -195,13 +195,13 @@ public class RegisteredCommand extends
/** /**
* sets the hostname. * sets the hostname.
*/ */
public Builder setHostname(String hostname) { public Builder setHostname(String host) {
this.hostname = hostname; this.hostname = host;
return this; return this;
} }
public Builder setIpAddress(String ipAddress) { public Builder setIpAddress(String ipAddr) {
this.ipAddress = ipAddress; this.ipAddress = ipAddr;
return this; return this;
} }

View File

@ -0,0 +1,19 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.protocolPB;

View File

@ -34,7 +34,7 @@ import java.net.InetSocketAddress;
/** /**
* Helper utility to test containers. * Helper utility to test containers.
*/ */
public class ContainerTestUtils { public final class ContainerTestUtils {
private ContainerTestUtils() { private ContainerTestUtils() {
} }

View File

@ -32,7 +32,7 @@ import java.net.InetSocketAddress;
/** /**
* Generic utilities for all HDDS/Ozone servers. * Generic utilities for all HDDS/Ozone servers.
*/ */
public class ServerUtils { public final class ServerUtils {
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
ServerUtils.class); ServerUtils.class);

View File

@ -27,7 +27,7 @@ import java.util.UUID;
/** /**
* Stateless helper functions to handler scm/datanode connection. * Stateless helper functions to handler scm/datanode connection.
*/ */
public class TestUtils { public final class TestUtils {
private TestUtils() { private TestUtils() {
} }

View File

@ -89,8 +89,8 @@ public class TestContainerPlacement {
capacityPlacer.chooseDatanodes(nodesRequired, containerSize); capacityPlacer.chooseDatanodes(nodesRequired, containerSize);
assertEquals(nodesRequired, nodesCapacity.size()); assertEquals(nodesRequired, nodesCapacity.size());
List<DatanodeDetails> nodesRandom = randomPlacer.chooseDatanodes(nodesRequired, List<DatanodeDetails> nodesRandom =
containerSize); randomPlacer.chooseDatanodes(nodesRequired, containerSize);
// One fifth of all calls are delete // One fifth of all calls are delete
if (x % 5 == 0) { if (x % 5 == 0) {

View File

@ -59,7 +59,7 @@ public abstract class OzoneCommandHandler {
this.err = err; this.err = err;
} }
public void logOut(String msg, String ... variable) { public void logOut(String msg, String... variable) {
this.out.println(String.format(msg, variable)); this.out.println(String.format(msg, variable));
} }

View File

@ -148,8 +148,9 @@ public class SCMCLI extends OzoneBaseCLI {
} }
private static void addTopLevelOptions(Options options) { private static void addTopLevelOptions(Options options) {
Option containerOps = new Option( Option containerOps =
ContainerCommandHandler.CONTAINER_CMD, false, "Container related options"); new Option(ContainerCommandHandler.CONTAINER_CMD, false,
"Container related options");
options.addOption(containerOps); options.addOption(containerOps);
// TODO : add pool, node and pipeline commands. // TODO : add pool, node and pipeline commands.
} }

View File

@ -66,8 +66,9 @@ public class CloseContainerHandler extends OzoneCommandHandler {
Options options = new Options(); Options options = new Options();
addOptions(options); addOptions(options);
HelpFormatter helpFormatter = new HelpFormatter(); HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp(SCMCLI.CMD_WIDTH, "hdfs scm -container -close <option>", helpFormatter
"where <option> is", options, ""); .printHelp(SCMCLI.CMD_WIDTH, "hdfs scm -container -close <option>",
"where <option> is", options, "");
} }
public static void addOptions(Options options) { public static void addOptions(Options options) {

View File

@ -35,8 +35,8 @@ import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
/** /**
* This test class verifies the parsing of SCM endpoint config settings. * This test class verifies the parsing of SCM endpoint config settings. The
* The parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}. * parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
*/ */
public class TestHddsClientUtils { public class TestHddsClientUtils {
@Rule @Rule

View File

@ -34,7 +34,7 @@ import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_PORT_DEFAULT;
* Stateless helper functions for the server and client side of KSM * Stateless helper functions for the server and client side of KSM
* communication. * communication.
*/ */
public class KsmUtils { public final class KsmUtils {
private KsmUtils() { private KsmUtils() {
} }

View File

@ -115,7 +115,7 @@ public class OzoneGetConf extends Configured implements Tool {
* {@link OzoneGetConf.Command}. * {@link OzoneGetConf.Command}.
*/ */
static class CommandHandler { static class CommandHandler {
String key; // Configuration key to lookup protected String key; // Configuration key to lookup
CommandHandler() { CommandHandler() {
this(null); this(null);
@ -136,7 +136,7 @@ public class OzoneGetConf extends Configured implements Tool {
return -1; return -1;
} }
protected void checkArgs(String args[]) { protected void checkArgs(String[] args) {
if (args.length > 0) { if (args.length > 0) {
throw new HadoopIllegalArgumentException( throw new HadoopIllegalArgumentException(
"Did not expect argument: " + args[0]); "Did not expect argument: " + args[0]);
@ -144,7 +144,7 @@ public class OzoneGetConf extends Configured implements Tool {
} }
/** Method to be overridden by sub classes for specific behavior */ /** Method to be overridden by sub classes for specific behavior. */
int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception { int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
String value = tool.getConf().getTrimmed(key); String value = tool.getConf().getTrimmed(key);
@ -205,10 +205,10 @@ public class OzoneGetConf extends Configured implements Tool {
*/ */
private int doWork(String[] args) { private int doWork(String[] args) {
if (args.length >= 1) { if (args.length >= 1) {
OzoneGetConf.CommandHandler handler = OzoneGetConf.Command.getHandler(args[0]); OzoneGetConf.CommandHandler handler =
OzoneGetConf.Command.getHandler(args[0]);
if (handler != null) { if (handler != null) {
return handler.doWork(this, return handler.doWork(this, Arrays.copyOfRange(args, 1, args.length));
Arrays.copyOfRange(args, 1, args.length));
} }
} }
printUsage(); printUsage();
@ -249,9 +249,9 @@ public class OzoneGetConf extends Configured implements Tool {
*/ */
static class KeySpaceManagersCommandHandler extends CommandHandler { static class KeySpaceManagersCommandHandler extends CommandHandler {
@Override @Override
public int doWorkInternal(OzoneGetConf tool, String[] args) throws IOException { public int doWorkInternal(OzoneGetConf tool, String[] args)
tool.printOut(KsmUtils.getKsmAddress(tool.getConf()) throws IOException {
.getHostName()); tool.printOut(KsmUtils.getKsmAddress(tool.getConf()).getHostName());
return 0; return 0;
} }
} }

View File

@ -44,7 +44,7 @@ public class TestContainerStateManager {
private XceiverClientManager xceiverClientManager; private XceiverClientManager xceiverClientManager;
private StorageContainerManager scm; private StorageContainerManager scm;
private Mapping scmContainerMapping; private Mapping scmContainerMapping;
private ContainerStateManager stateManager; private ContainerStateManager containerStateManager;
private String containerOwner = "OZONE"; private String containerOwner = "OZONE";
@ -56,7 +56,7 @@ public class TestContainerStateManager {
xceiverClientManager = new XceiverClientManager(conf); xceiverClientManager = new XceiverClientManager(conf);
scm = cluster.getStorageContainerManager(); scm = cluster.getStorageContainerManager();
scmContainerMapping = scm.getScmContainerManager(); scmContainerMapping = scm.getScmContainerManager();
stateManager = scmContainerMapping.getStateManager(); containerStateManager = scmContainerMapping.getStateManager();
} }
@After @After
@ -72,7 +72,7 @@ public class TestContainerStateManager {
String container1 = "container" + RandomStringUtils.randomNumeric(5); String container1 = "container" + RandomStringUtils.randomNumeric(5);
scm.allocateContainer(xceiverClientManager.getType(), scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container1, containerOwner); xceiverClientManager.getFactor(), container1, containerOwner);
ContainerInfo info = stateManager ContainerInfo info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner, .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED); HddsProtos.LifeCycleState.ALLOCATED);
@ -89,7 +89,7 @@ public class TestContainerStateManager {
String container2 = "container" + RandomStringUtils.randomNumeric(5); String container2 = "container" + RandomStringUtils.randomNumeric(5);
scm.allocateContainer(xceiverClientManager.getType(), scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container2, containerOwner); xceiverClientManager.getFactor(), container2, containerOwner);
int numContainers = stateManager int numContainers = containerStateManager
.getMatchingContainerIDs(containerOwner, .getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED).size(); HddsProtos.LifeCycleState.ALLOCATED).size();
@ -139,13 +139,13 @@ public class TestContainerStateManager {
scm.allocateContainer(xceiverClientManager.getType(), scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container2, containerOwner); xceiverClientManager.getFactor(), container2, containerOwner);
ContainerInfo info = stateManager ContainerInfo info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner, .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN); HddsProtos.LifeCycleState.OPEN);
Assert.assertEquals(container1, info.getContainerName()); Assert.assertEquals(container1, info.getContainerName());
info = stateManager info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner, .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED); HddsProtos.LifeCycleState.ALLOCATED);
@ -158,7 +158,7 @@ public class TestContainerStateManager {
// space has already been allocated in container1, now container 2 should // space has already been allocated in container1, now container 2 should
// be chosen. // be chosen.
info = stateManager info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner, .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN); HddsProtos.LifeCycleState.OPEN);
@ -167,7 +167,7 @@ public class TestContainerStateManager {
@Test @Test
public void testUpdateContainerState() throws IOException { public void testUpdateContainerState() throws IOException {
NavigableSet<ContainerID> containerList = stateManager NavigableSet<ContainerID> containerList = containerStateManager
.getMatchingContainerIDs(containerOwner, .getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED); HddsProtos.LifeCycleState.ALLOCATED);
@ -179,49 +179,49 @@ public class TestContainerStateManager {
String container1 = "container" + RandomStringUtils.randomNumeric(5); String container1 = "container" + RandomStringUtils.randomNumeric(5);
scm.allocateContainer(xceiverClientManager.getType(), scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container1, containerOwner); xceiverClientManager.getFactor(), container1, containerOwner);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED).size(); HddsProtos.LifeCycleState.ALLOCATED).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
scmContainerMapping.updateContainerState(container1, scmContainerMapping.updateContainerState(container1,
HddsProtos.LifeCycleEvent.CREATE); HddsProtos.LifeCycleEvent.CREATE);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CREATING).size(); HddsProtos.LifeCycleState.CREATING).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
scmContainerMapping.updateContainerState(container1, scmContainerMapping.updateContainerState(container1,
HddsProtos.LifeCycleEvent.CREATED); HddsProtos.LifeCycleEvent.CREATED);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN).size(); HddsProtos.LifeCycleState.OPEN).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
scmContainerMapping scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.FINALIZE); .updateContainerState(container1, HddsProtos.LifeCycleEvent.FINALIZE);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSING).size(); HddsProtos.LifeCycleState.CLOSING).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
scmContainerMapping scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.CLOSE); .updateContainerState(container1, HddsProtos.LifeCycleEvent.CLOSE);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSED).size(); HddsProtos.LifeCycleState.CLOSED).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
scmContainerMapping scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.DELETE); .updateContainerState(container1, HddsProtos.LifeCycleEvent.DELETE);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETING).size(); HddsProtos.LifeCycleState.DELETING).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
scmContainerMapping scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.CLEANUP); .updateContainerState(container1, HddsProtos.LifeCycleEvent.CLEANUP);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETED).size(); HddsProtos.LifeCycleState.DELETED).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
@ -235,7 +235,7 @@ public class TestContainerStateManager {
HddsProtos.LifeCycleEvent.CREATE); HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping scmContainerMapping
.updateContainerState(container2, HddsProtos.LifeCycleEvent.TIMEOUT); .updateContainerState(container2, HddsProtos.LifeCycleEvent.TIMEOUT);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETING).size(); HddsProtos.LifeCycleState.DELETING).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
@ -253,7 +253,7 @@ public class TestContainerStateManager {
HddsProtos.LifeCycleEvent.FINALIZE); HddsProtos.LifeCycleEvent.FINALIZE);
scmContainerMapping scmContainerMapping
.updateContainerState(container3, HddsProtos.LifeCycleEvent.CLOSE); .updateContainerState(container3, HddsProtos.LifeCycleEvent.CLOSE);
containers = stateManager.getMatchingContainerIDs(containerOwner, containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSED).size(); HddsProtos.LifeCycleState.CLOSED).size();
Assert.assertEquals(1, containers); Assert.assertEquals(1, containers);
@ -275,7 +275,7 @@ public class TestContainerStateManager {
long size = Math.abs(ran.nextLong() % OzoneConsts.GB); long size = Math.abs(ran.nextLong() % OzoneConsts.GB);
allocatedSize += size; allocatedSize += size;
// trigger allocating bytes by calling getMatchingContainer // trigger allocating bytes by calling getMatchingContainer
ContainerInfo info = stateManager ContainerInfo info = containerStateManager
.getMatchingContainer(size, containerOwner, .getMatchingContainer(size, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(), xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN); HddsProtos.LifeCycleState.OPEN);

View File

@ -119,8 +119,8 @@ public interface MiniOzoneCluster {
* @return StorageContainerLocation Client * @return StorageContainerLocation Client
* @throws IOException * @throws IOException
*/ */
StorageContainerLocationProtocolClientSideTranslatorPB StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient()
getStorageContainerLocationClient() throws IOException; throws IOException;
/** /**
* Restarts StorageContainerManager instance. * Restarts StorageContainerManager instance.
@ -166,6 +166,9 @@ public interface MiniOzoneCluster {
return new MiniOzoneClusterImpl.Builder(conf); return new MiniOzoneClusterImpl.Builder(conf);
} }
/**
* Builder class for MiniOzoneCluster.
*/
abstract class Builder { abstract class Builder {
protected static final int DEFAULT_HB_INTERVAL_MS = 1000; protected static final int DEFAULT_HB_INTERVAL_MS = 1000;
@ -280,7 +283,7 @@ public interface MiniOzoneCluster {
* *
* @return MiniOzoneCluster.Builder * @return MiniOzoneCluster.Builder
*/ */
public Builder setHbProcessorInterval (int val) { public Builder setHbProcessorInterval(int val) {
hbProcessorInterval = Optional.of(val); hbProcessorInterval = Optional.of(val);
return this; return this;
} }

View File

@ -179,8 +179,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
* @throws IOException if there is an I/O error * @throws IOException if there is an I/O error
*/ */
@Override @Override
public StorageContainerLocationProtocolClientSideTranslatorPB public StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient()
getStorageContainerLocationClient() throws IOException { throws IOException {
long version = RPC.getProtocolVersion( long version = RPC.getProtocolVersion(
StorageContainerLocationProtocolPB.class); StorageContainerLocationProtocolPB.class);
InetSocketAddress address = scm.getClientRpcAddress(); InetSocketAddress address = scm.getClientRpcAddress();

View File

@ -96,9 +96,10 @@ public class TestContainerReportWithKeys {
ObjectStore objectStore = client.getObjectStore(); ObjectStore objectStore = client.getObjectStore();
objectStore.createVolume(volumeName); objectStore.createVolume(volumeName);
objectStore.getVolume(volumeName).createBucket(bucketName); objectStore.getVolume(volumeName).createBucket(bucketName);
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) OzoneOutputStream key =
.createKey(keyName, keySize, ReplicationType.STAND_ALONE, objectStore.getVolume(volumeName).getBucket(bucketName)
ReplicationFactor.ONE); .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
ReplicationFactor.ONE);
String dataString = RandomStringUtils.randomAlphabetic(keySize); String dataString = RandomStringUtils.randomAlphabetic(keySize);
key.write(dataString.getBytes()); key.write(dataString.getBytes());
key.close(); key.close();

View File

@ -96,7 +96,7 @@ public class TestContainerSQLCli {
private final static long DEFAULT_BLOCK_SIZE = 4 * KB; private final static long DEFAULT_BLOCK_SIZE = 4 * KB;
private static HddsProtos.ReplicationFactor factor; private static HddsProtos.ReplicationFactor factor;
private static HddsProtos.ReplicationType type; private static HddsProtos.ReplicationType type;
private static final String containerOwner = "OZONE"; private static final String CONTAINER_OWNER = "OZONE";
@Before @Before
@ -141,7 +141,7 @@ public class TestContainerSQLCli {
} }
assertEquals(2, nodeManager.getAllNodes().size()); assertEquals(2, nodeManager.getAllNodes().size());
AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, type, AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, type,
factor, containerOwner); factor, CONTAINER_OWNER);
pipeline1 = ab1.getPipeline(); pipeline1 = ab1.getPipeline();
blockContainerMap.put(ab1.getKey(), pipeline1.getContainerName()); blockContainerMap.put(ab1.getKey(), pipeline1.getContainerName());
@ -154,7 +154,7 @@ public class TestContainerSQLCli {
// the size of blockContainerMap will vary each time the test is run. // the size of blockContainerMap will vary each time the test is run.
while (true) { while (true) {
ab2 = blockManager ab2 = blockManager
.allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner); .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, CONTAINER_OWNER);
pipeline2 = ab2.getPipeline(); pipeline2 = ab2.getPipeline();
blockContainerMap.put(ab2.getKey(), pipeline2.getContainerName()); blockContainerMap.put(ab2.getKey(), pipeline2.getContainerName());
if (!pipeline1.getContainerName().equals(pipeline2.getContainerName())) { if (!pipeline1.getContainerName().equals(pipeline2.getContainerName())) {

View File

@ -14,7 +14,7 @@
* License for the specific language governing permissions and limitations under * License for the specific language governing permissions and limitations under
* the License. * the License.
*/ */
package org.apache.hadoop.hdds.scm.node; package org.apache.hadoop.ozone.scm.node;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;

View File

@ -42,89 +42,73 @@ import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.Objects; import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@State(Scope.Thread) @State(Scope.Thread)
public class BenchMarkContainerStateMap { public class BenchMarkContainerStateMap {
public ContainerStateMap stateMap; private ContainerStateMap stateMap;
public AtomicInteger containerID; private AtomicInteger containerID;
@Setup(Level.Trial) @Setup(Level.Trial)
public void initialize() throws IOException { public void initialize() throws IOException {
stateMap = new ContainerStateMap(); stateMap = new ContainerStateMap();
Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString()); Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
Preconditions.checkNotNull(pipeline, "Pipeline cannot be null."); Preconditions.checkNotNull(pipeline, "Pipeline cannot be null.");
int currentCount = 1; int currentCount = 1;
for (int x = 1; x < 1000; x++) { for (int x = 1; x < 1000; x++) {
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName())
.setState(CLOSED)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0)
.setUsedBytes(0)
.setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(x)
.build();
stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) {
e.printStackTrace();
}
}
for (int y = currentCount; y < 2000; y++) {
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName())
.setState(OPEN)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0)
.setUsedBytes(0)
.setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(y)
.build();
stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) {
e.printStackTrace();
}
}
try { try {
ContainerInfo containerInfo = new ContainerInfo.Builder() ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName()) .setContainerName(pipeline.getContainerName()).setState(CLOSED)
.setState(OPEN)
.setPipeline(pipeline) .setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the // This is bytes allocated for blocks inside container, not the
// container size // container size
.setAllocatedBytes(0) .setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setUsedBytes(0) .setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setNumberOfKeys(0) .setContainerID(x).build();
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(currentCount++)
.build();
stateMap.addContainer(containerInfo); stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) { } catch (SCMException e) {
e.printStackTrace(); e.printStackTrace();
} }
}
containerID = new AtomicInteger(currentCount++); for (int y = currentCount; y < 2000; y++) {
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName()).setState(OPEN)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setContainerID(y).build();
stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) {
e.printStackTrace();
}
}
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName()).setState(OPEN)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setContainerID(currentCount++).build();
stateMap.addContainer(containerInfo);
} catch (SCMException e) {
e.printStackTrace();
} }
public static Pipeline createSingleNodePipeline(String containerName) throws containerID = new AtomicInteger(currentCount++);
IOException {
}
public static Pipeline createSingleNodePipeline(String containerName)
throws IOException {
return createPipeline(containerName, 1); return createPipeline(containerName, 1);
} }
@ -144,9 +128,8 @@ public class BenchMarkContainerStateMap {
return createPipeline(containerName, ids); return createPipeline(containerName, ids);
} }
public static Pipeline createPipeline( public static Pipeline createPipeline(String containerName,
String containerName, Iterable<DatanodeDetails> ids) Iterable<DatanodeDetails> ids) throws IOException {
throws IOException {
Objects.requireNonNull(ids, "ids == null"); Objects.requireNonNull(ids, "ids == null");
final Iterator<DatanodeDetails> i = ids.iterator(); final Iterator<DatanodeDetails> i = ids.iterator();
Preconditions.checkArgument(i.hasNext()); Preconditions.checkArgument(i.hasNext());
@ -156,37 +139,33 @@ public class BenchMarkContainerStateMap {
new PipelineChannel(leader.getUuidString(), OPEN, new PipelineChannel(leader.getUuidString(), OPEN,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName); ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
pipelineChannel.addMember(leader); pipelineChannel.addMember(leader);
for (; i.hasNext(); ) { for (; i.hasNext();) {
pipelineChannel.addMember(i.next()); pipelineChannel.addMember(i.next());
} }
return new Pipeline(containerName, pipelineChannel); return new Pipeline(containerName, pipelineChannel);
} }
@Benchmark @Benchmark
public void createContainerBenchMark(BenchMarkContainerStateMap state, Blackhole bh) public void createContainerBenchMark(BenchMarkContainerStateMap state,
throws IOException { Blackhole bh) throws IOException {
Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString()); Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
int cid = state.containerID.incrementAndGet(); int cid = state.containerID.incrementAndGet();
ContainerInfo containerInfo = new ContainerInfo.Builder() ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName()) .setContainerName(pipeline.getContainerName()).setState(CLOSED)
.setState(CLOSED)
.setPipeline(pipeline) .setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the // This is bytes allocated for blocks inside container, not the
// container size // container size
.setAllocatedBytes(0) .setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setUsedBytes(0) .setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setNumberOfKeys(0) .setContainerID(cid).build();
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(cid)
.build();
state.stateMap.addContainer(containerInfo); state.stateMap.addContainer(containerInfo);
} }
@Benchmark @Benchmark
public void getMatchingContainerBenchMark(BenchMarkContainerStateMap state, public void getMatchingContainerBenchMark(BenchMarkContainerStateMap state,
Blackhole bh) { Blackhole bh) {
bh.consume(state.stateMap.getMatchingContainerIDs(OPEN, "BILBO", bh.consume(state.stateMap
ReplicationFactor.ONE, ReplicationType.STAND_ALONE)); .getMatchingContainerIDs(OPEN, "BILBO", ReplicationFactor.ONE,
ReplicationType.STAND_ALONE));
} }
} }

View File

@ -7,7 +7,6 @@ import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl; import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
import org.apache.hadoop.ozone.container.common.impl.Dispatcher; import org.apache.hadoop.ozone.container.common.impl.Dispatcher;
@ -106,7 +105,8 @@ public class BenchMarkDatanodeDispatcher {
for (int y = 0; y < 100; y++) { for (int y = 0; y < 100; y++) {
String containerName = "container-" + y; String containerName = "container-" + y;
dispatcher.dispatch(getWriteChunkCommand(containerName, chunkName)); dispatcher.dispatch(getWriteChunkCommand(containerName, chunkName));
dispatcher.dispatch(getPutKeyCommand(containerName, chunkName, keyName)); dispatcher
.dispatch(getPutKeyCommand(containerName, chunkName, keyName));
} }
} }
} }
@ -119,8 +119,8 @@ public class BenchMarkDatanodeDispatcher {
private ContainerCommandRequestProto getCreateContainerCommand( private ContainerCommandRequestProto getCreateContainerCommand(
String containerName) { String containerName) {
CreateContainerRequestProto.Builder createRequest = CreateContainerRequestProto CreateContainerRequestProto.Builder createRequest =
.newBuilder(); CreateContainerRequestProto.newBuilder();
createRequest.setPipeline( createRequest.setPipeline(
new Pipeline(containerName, pipelineChannel).getProtobufMessage()); new Pipeline(containerName, pipelineChannel).getProtobufMessage());
createRequest.setContainerData( createRequest.setContainerData(
@ -174,12 +174,12 @@ public class BenchMarkDatanodeDispatcher {
private ContainerProtos.ChunkInfo getChunkInfo( private ContainerProtos.ChunkInfo getChunkInfo(
String containerName, String key) { String containerName, String key) {
ContainerProtos.ChunkInfo.Builder builder = ContainerProtos.ChunkInfo ContainerProtos.ChunkInfo.Builder builder =
.newBuilder() ContainerProtos.ChunkInfo.newBuilder()
.setChunkName( .setChunkName(
DigestUtils.md5Hex(key) + "_stream_" + containerName + "_chunk_" + key) DigestUtils.md5Hex(key) + "_stream_" + containerName + "_chunk_"
.setOffset(0) + key)
.setLen(data.size()); .setOffset(0).setLen(data.size());
return builder.build(); return builder.build();
} }
@ -250,7 +250,7 @@ public class BenchMarkDatanodeDispatcher {
String containerName = "container-" + random.nextInt(containerCount.get()); String containerName = "container-" + random.nextInt(containerCount.get());
String chunkKey = "chunk-" + random.nextInt(chunkCount.get()); String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
bmdd.dispatcher.dispatch(getPutKeyCommand( bmdd.dispatcher.dispatch(getPutKeyCommand(
containerName, chunkKey,"key-" + keyCount.getAndIncrement())); containerName, chunkKey, "key-" + keyCount.getAndIncrement()));
} }
@Benchmark @Benchmark

View File

@ -21,7 +21,7 @@ import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
public class BenchMarkMetadataStoreReads { public class BenchMarkMetadataStoreReads {
private static final int DATA_LEN = 1024; private static final int DATA_LEN = 1024;
private static final long maxKeys = 1024 * 10; private static final long MAX_KEYS = 1024 * 10;
private MetadataStore store; private MetadataStore store;
@ -33,7 +33,7 @@ public class BenchMarkMetadataStoreReads {
store = GenesisUtil.getMetadataStore(this.type); store = GenesisUtil.getMetadataStore(this.type);
byte[] data = RandomStringUtils.randomAlphanumeric(DATA_LEN) byte[] data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
.getBytes(Charset.forName("UTF-8")); .getBytes(Charset.forName("UTF-8"));
for (int x = 0; x < maxKeys; x++) { for (int x = 0; x < MAX_KEYS; x++) {
store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data); store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
} }
if (type.compareTo(CLOSED_TYPE) == 0) { if (type.compareTo(CLOSED_TYPE) == 0) {
@ -43,7 +43,7 @@ public class BenchMarkMetadataStoreReads {
@Benchmark @Benchmark
public void test(Blackhole bh) throws IOException { public void test(Blackhole bh) throws IOException {
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, maxKeys); long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
bh.consume( bh.consume(
store.get(Long.toHexString(x).getBytes(Charset.forName("UTF-8")))); store.get(Long.toHexString(x).getBytes(Charset.forName("UTF-8"))));
} }

View File

@ -20,7 +20,7 @@ public class BenchMarkMetadataStoreWrites {
private static final int DATA_LEN = 1024; private static final int DATA_LEN = 1024;
private static final long maxKeys = 1024 * 10; private static final long MAX_KEYS = 1024 * 10;
private MetadataStore store; private MetadataStore store;
private byte[] data; private byte[] data;
@ -37,7 +37,7 @@ public class BenchMarkMetadataStoreWrites {
@Benchmark @Benchmark
public void test() throws IOException { public void test() throws IOException {
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, maxKeys); long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data); store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
} }
} }

View File

@ -42,13 +42,13 @@ public class BenchMarkRocksDbStore {
private byte[] data; private byte[] data;
@Param(value = {"8"}) @Param(value = {"8"})
private String blockSize;// 4KB default private String blockSize; // 4KB default
@Param(value = {"64"}) @Param(value = {"64"})
private String writeBufferSize; //64 MB default private String writeBufferSize; //64 MB default
@Param(value = {"16"}) @Param(value = {"16"})
private String maxWriteBufferNumber;// 2 default private String maxWriteBufferNumber; // 2 default
@Param(value = {"4"}) @Param(value = {"4"})
private String maxBackgroundFlushes; // 1 default private String maxBackgroundFlushes; // 1 default

View File

@ -31,7 +31,11 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
* Hence, these classes do not use the Tool/Runner pattern of standard Hadoop * Hence, these classes do not use the Tool/Runner pattern of standard Hadoop
* CLI. * CLI.
*/ */
public class Genesis { public final class Genesis {
private Genesis() {
}
public static void main(String[] args) throws RunnerException { public static void main(String[] args) throws RunnerException {
Options opt = new OptionsBuilder() Options opt = new OptionsBuilder()
.include(BenchMarkContainerStateMap.class.getSimpleName()) .include(BenchMarkContainerStateMap.class.getSimpleName())

View File

@ -14,7 +14,7 @@ import java.util.Random;
/** /**
* Utility class for benchmark test cases. * Utility class for benchmark test cases.
*/ */
public class GenesisUtil { public final class GenesisUtil {
private GenesisUtil() { private GenesisUtil() {
// private constructor. // private constructor.
@ -28,8 +28,8 @@ public class GenesisUtil {
private static final int DB_FILE_LEN = 7; private static final int DB_FILE_LEN = 7;
private static final String TMP_DIR = "java.io.tmpdir"; private static final String TMP_DIR = "java.io.tmpdir";
public static MetadataStore getMetadataStore(String dbType)
public static MetadataStore getMetadataStore(String dbType) throws IOException { throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MetadataStoreBuilder builder = MetadataStoreBuilder.newBuilder(); MetadataStoreBuilder builder = MetadataStoreBuilder.newBuilder();
builder.setConf(conf); builder.setConf(conf);