HDFS-13444. Ozone: Fix checkstyle issues in HDFS-7240. Contributed by Lokesh Jain.

This commit is contained in:
Nanda kumar 2018-04-17 16:11:47 +05:30 committed by Owen O'Malley
parent 3d18ca4926
commit 17974ba3a6
35 changed files with 254 additions and 184 deletions

View File

@ -1,6 +1,6 @@
package org.apache.hadoop.hdds;
public class HddsConfigKeys {
public final class HddsConfigKeys {
private HddsConfigKeys() {
}
}

View File

@ -48,7 +48,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
/**
* HDDS specific stateless utility functions.
*/
public class HddsUtils {
public final class HddsUtils {
private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);

View File

@ -53,7 +53,8 @@ public class HddsConfServlet extends HttpServlet {
private static final String COMMAND = "cmd";
private static final OzoneConfiguration OZONE_CONFIG =
new OzoneConfiguration();
transient Logger LOG = LoggerFactory.getLogger(HddsConfServlet.class);
private static final transient Logger LOG =
LoggerFactory.getLogger(HddsConfServlet.class);
/**
@ -152,25 +153,25 @@ public class HddsConfServlet extends HttpServlet {
Configuration config = getOzoneConfig();
switch (cmd) {
case "getOzoneTags":
out.write(gson.toJson(config.get("ozone.system.tags").split(",")));
break;
case "getPropertyByTag":
String tags = request.getParameter("tags");
Map<String, Properties> propMap = new HashMap<>();
case "getOzoneTags":
out.write(gson.toJson(config.get("ozone.system.tags").split(",")));
break;
case "getPropertyByTag":
String tags = request.getParameter("tags");
Map<String, Properties> propMap = new HashMap<>();
for (String tag : tags.split(",")) {
if (config.isPropertyTag(tag)) {
Properties properties = config.getAllPropertiesByTag(tag);
propMap.put(tag, properties);
} else {
LOG.debug("Not a valid tag" + tag);
}
for (String tag : tags.split(",")) {
if (config.isPropertyTag(tag)) {
Properties properties = config.getAllPropertiesByTag(tag);
propMap.put(tag, properties);
} else {
LOG.debug("Not a valid tag" + tag);
}
out.write(gson.toJsonTree(propMap).toString());
break;
default:
throw new IllegalArgumentException(cmd + " is not a valid command.");
}
out.write(gson.toJsonTree(propMap).toString());
break;
default:
throw new IllegalArgumentException(cmd + " is not a valid command.");
}
}

View File

@ -0,0 +1,18 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container;

View File

@ -0,0 +1,19 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.utils;

View File

@ -55,7 +55,10 @@ import static org.apache.hadoop.hdds.server.ServerUtils.sanitizeUserArgs;
/**
* Hdds stateless helper functions for server side components.
*/
public class HddsServerUtil {
public final class HddsServerUtil {
private HddsServerUtil() {
}
private static final Logger LOG = LoggerFactory.getLogger(
HddsServerUtil.class);

View File

@ -0,0 +1,19 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm;

View File

@ -227,7 +227,7 @@ public class HddsDatanodeService implements ServicePlugin {
return new HddsDatanodeService(conf);
}
public static void main(String args[]) {
public static void main(String[] args) {
try {
StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
HddsDatanodeService hddsDatanodeService =

View File

@ -107,6 +107,8 @@ public class ChunkManagerImpl implements ChunkManager {
containerManager.incrWriteCount(containerName);
containerManager.incrWriteBytes(containerName, info.getLen());
break;
default:
throw new IOException("Can not identify write operation.");
}
} catch (ExecutionException | NoSuchAlgorithmException | IOException e) {
LOG.error("write data failed. error: {}", e);

View File

@ -464,7 +464,7 @@ public class Dispatcher implements ContainerDispatcher {
byte[] data = null;
if (msg.getWriteChunk().getStage() == ContainerProtos.Stage.WRITE_DATA
|| msg.getWriteChunk().getStage() == ContainerProtos.Stage.COMBINED) {
data = msg.getWriteChunk().getData().toByteArray();
data = msg.getWriteChunk().getData().toByteArray();
metrics.incContainerBytesStats(Type.WriteChunk, data.length);
}

View File

@ -195,13 +195,13 @@ public class RegisteredCommand extends
/**
* sets the hostname.
*/
public Builder setHostname(String hostname) {
this.hostname = hostname;
public Builder setHostname(String host) {
this.hostname = host;
return this;
}
public Builder setIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
public Builder setIpAddress(String ipAddr) {
this.ipAddress = ipAddr;
return this;
}

View File

@ -0,0 +1,19 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.protocolPB;

View File

@ -34,7 +34,7 @@ import java.net.InetSocketAddress;
/**
* Helper utility to test containers.
*/
public class ContainerTestUtils {
public final class ContainerTestUtils {
private ContainerTestUtils() {
}

View File

@ -32,7 +32,7 @@ import java.net.InetSocketAddress;
/**
* Generic utilities for all HDDS/Ozone servers.
*/
public class ServerUtils {
public final class ServerUtils {
private static final Logger LOG = LoggerFactory.getLogger(
ServerUtils.class);

View File

@ -27,7 +27,7 @@ import java.util.UUID;
/**
* Stateless helper functions to handler scm/datanode connection.
*/
public class TestUtils {
public final class TestUtils {
private TestUtils() {
}

View File

@ -89,8 +89,8 @@ public class TestContainerPlacement {
capacityPlacer.chooseDatanodes(nodesRequired, containerSize);
assertEquals(nodesRequired, nodesCapacity.size());
List<DatanodeDetails> nodesRandom = randomPlacer.chooseDatanodes(nodesRequired,
containerSize);
List<DatanodeDetails> nodesRandom =
randomPlacer.chooseDatanodes(nodesRequired, containerSize);
// One fifth of all calls are delete
if (x % 5 == 0) {

View File

@ -59,7 +59,7 @@ public abstract class OzoneCommandHandler {
this.err = err;
}
public void logOut(String msg, String ... variable) {
public void logOut(String msg, String... variable) {
this.out.println(String.format(msg, variable));
}

View File

@ -148,8 +148,9 @@ public class SCMCLI extends OzoneBaseCLI {
}
private static void addTopLevelOptions(Options options) {
Option containerOps = new Option(
ContainerCommandHandler.CONTAINER_CMD, false, "Container related options");
Option containerOps =
new Option(ContainerCommandHandler.CONTAINER_CMD, false,
"Container related options");
options.addOption(containerOps);
// TODO : add pool, node and pipeline commands.
}

View File

@ -66,8 +66,9 @@ public class CloseContainerHandler extends OzoneCommandHandler {
Options options = new Options();
addOptions(options);
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp(SCMCLI.CMD_WIDTH, "hdfs scm -container -close <option>",
"where <option> is", options, "");
helpFormatter
.printHelp(SCMCLI.CMD_WIDTH, "hdfs scm -container -close <option>",
"where <option> is", options, "");
}
public static void addOptions(Options options) {

View File

@ -35,8 +35,8 @@ import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* This test class verifies the parsing of SCM endpoint config settings.
* The parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
* This test class verifies the parsing of SCM endpoint config settings. The
* parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
*/
public class TestHddsClientUtils {
@Rule

View File

@ -34,7 +34,7 @@ import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_PORT_DEFAULT;
* Stateless helper functions for the server and client side of KSM
* communication.
*/
public class KsmUtils {
public final class KsmUtils {
private KsmUtils() {
}

View File

@ -115,7 +115,7 @@ public class OzoneGetConf extends Configured implements Tool {
* {@link OzoneGetConf.Command}.
*/
static class CommandHandler {
String key; // Configuration key to lookup
protected String key; // Configuration key to lookup
CommandHandler() {
this(null);
@ -136,7 +136,7 @@ public class OzoneGetConf extends Configured implements Tool {
return -1;
}
protected void checkArgs(String args[]) {
protected void checkArgs(String[] args) {
if (args.length > 0) {
throw new HadoopIllegalArgumentException(
"Did not expect argument: " + args[0]);
@ -144,7 +144,7 @@ public class OzoneGetConf extends Configured implements Tool {
}
/** Method to be overridden by sub classes for specific behavior */
/** Method to be overridden by sub classes for specific behavior. */
int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
String value = tool.getConf().getTrimmed(key);
@ -205,10 +205,10 @@ public class OzoneGetConf extends Configured implements Tool {
*/
private int doWork(String[] args) {
if (args.length >= 1) {
OzoneGetConf.CommandHandler handler = OzoneGetConf.Command.getHandler(args[0]);
OzoneGetConf.CommandHandler handler =
OzoneGetConf.Command.getHandler(args[0]);
if (handler != null) {
return handler.doWork(this,
Arrays.copyOfRange(args, 1, args.length));
return handler.doWork(this, Arrays.copyOfRange(args, 1, args.length));
}
}
printUsage();
@ -249,9 +249,9 @@ public class OzoneGetConf extends Configured implements Tool {
*/
static class KeySpaceManagersCommandHandler extends CommandHandler {
@Override
public int doWorkInternal(OzoneGetConf tool, String[] args) throws IOException {
tool.printOut(KsmUtils.getKsmAddress(tool.getConf())
.getHostName());
public int doWorkInternal(OzoneGetConf tool, String[] args)
throws IOException {
tool.printOut(KsmUtils.getKsmAddress(tool.getConf()).getHostName());
return 0;
}
}

View File

@ -44,7 +44,7 @@ public class TestContainerStateManager {
private XceiverClientManager xceiverClientManager;
private StorageContainerManager scm;
private Mapping scmContainerMapping;
private ContainerStateManager stateManager;
private ContainerStateManager containerStateManager;
private String containerOwner = "OZONE";
@ -56,7 +56,7 @@ public class TestContainerStateManager {
xceiverClientManager = new XceiverClientManager(conf);
scm = cluster.getStorageContainerManager();
scmContainerMapping = scm.getScmContainerManager();
stateManager = scmContainerMapping.getStateManager();
containerStateManager = scmContainerMapping.getStateManager();
}
@After
@ -72,7 +72,7 @@ public class TestContainerStateManager {
String container1 = "container" + RandomStringUtils.randomNumeric(5);
scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container1, containerOwner);
ContainerInfo info = stateManager
ContainerInfo info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED);
@ -89,7 +89,7 @@ public class TestContainerStateManager {
String container2 = "container" + RandomStringUtils.randomNumeric(5);
scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container2, containerOwner);
int numContainers = stateManager
int numContainers = containerStateManager
.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED).size();
@ -139,13 +139,13 @@ public class TestContainerStateManager {
scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container2, containerOwner);
ContainerInfo info = stateManager
ContainerInfo info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN);
Assert.assertEquals(container1, info.getContainerName());
info = stateManager
info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED);
@ -158,7 +158,7 @@ public class TestContainerStateManager {
// space has already been allocated in container1, now container 2 should
// be chosen.
info = stateManager
info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN);
@ -167,7 +167,7 @@ public class TestContainerStateManager {
@Test
public void testUpdateContainerState() throws IOException {
NavigableSet<ContainerID> containerList = stateManager
NavigableSet<ContainerID> containerList = containerStateManager
.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED);
@ -179,49 +179,49 @@ public class TestContainerStateManager {
String container1 = "container" + RandomStringUtils.randomNumeric(5);
scm.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), container1, containerOwner);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED).size();
Assert.assertEquals(1, containers);
scmContainerMapping.updateContainerState(container1,
HddsProtos.LifeCycleEvent.CREATE);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CREATING).size();
Assert.assertEquals(1, containers);
scmContainerMapping.updateContainerState(container1,
HddsProtos.LifeCycleEvent.CREATED);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN).size();
Assert.assertEquals(1, containers);
scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.FINALIZE);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSING).size();
Assert.assertEquals(1, containers);
scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.CLOSE);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSED).size();
Assert.assertEquals(1, containers);
scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.DELETE);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETING).size();
Assert.assertEquals(1, containers);
scmContainerMapping
.updateContainerState(container1, HddsProtos.LifeCycleEvent.CLEANUP);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETED).size();
Assert.assertEquals(1, containers);
@ -235,7 +235,7 @@ public class TestContainerStateManager {
HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping
.updateContainerState(container2, HddsProtos.LifeCycleEvent.TIMEOUT);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETING).size();
Assert.assertEquals(1, containers);
@ -253,7 +253,7 @@ public class TestContainerStateManager {
HddsProtos.LifeCycleEvent.FINALIZE);
scmContainerMapping
.updateContainerState(container3, HddsProtos.LifeCycleEvent.CLOSE);
containers = stateManager.getMatchingContainerIDs(containerOwner,
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSED).size();
Assert.assertEquals(1, containers);
@ -275,7 +275,7 @@ public class TestContainerStateManager {
long size = Math.abs(ran.nextLong() % OzoneConsts.GB);
allocatedSize += size;
// trigger allocating bytes by calling getMatchingContainer
ContainerInfo info = stateManager
ContainerInfo info = containerStateManager
.getMatchingContainer(size, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN);

View File

@ -119,8 +119,8 @@ public interface MiniOzoneCluster {
* @return StorageContainerLocation Client
* @throws IOException
*/
StorageContainerLocationProtocolClientSideTranslatorPB
getStorageContainerLocationClient() throws IOException;
StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient()
throws IOException;
/**
* Restarts StorageContainerManager instance.
@ -166,6 +166,9 @@ public interface MiniOzoneCluster {
return new MiniOzoneClusterImpl.Builder(conf);
}
/**
* Builder class for MiniOzoneCluster.
*/
abstract class Builder {
protected static final int DEFAULT_HB_INTERVAL_MS = 1000;
@ -280,7 +283,7 @@ public interface MiniOzoneCluster {
*
* @return MiniOzoneCluster.Builder
*/
public Builder setHbProcessorInterval (int val) {
public Builder setHbProcessorInterval(int val) {
hbProcessorInterval = Optional.of(val);
return this;
}

View File

@ -179,8 +179,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
* @throws IOException if there is an I/O error
*/
@Override
public StorageContainerLocationProtocolClientSideTranslatorPB
getStorageContainerLocationClient() throws IOException {
public StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient()
throws IOException {
long version = RPC.getProtocolVersion(
StorageContainerLocationProtocolPB.class);
InetSocketAddress address = scm.getClientRpcAddress();

View File

@ -96,9 +96,10 @@ public class TestContainerReportWithKeys {
ObjectStore objectStore = client.getObjectStore();
objectStore.createVolume(volumeName);
objectStore.getVolume(volumeName).createBucket(bucketName);
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName)
.createKey(keyName, keySize, ReplicationType.STAND_ALONE,
ReplicationFactor.ONE);
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
.createKey(keyName, keySize, ReplicationType.STAND_ALONE,
ReplicationFactor.ONE);
String dataString = RandomStringUtils.randomAlphabetic(keySize);
key.write(dataString.getBytes());
key.close();

View File

@ -96,7 +96,7 @@ public class TestContainerSQLCli {
private final static long DEFAULT_BLOCK_SIZE = 4 * KB;
private static HddsProtos.ReplicationFactor factor;
private static HddsProtos.ReplicationType type;
private static final String containerOwner = "OZONE";
private static final String CONTAINER_OWNER = "OZONE";
@Before
@ -141,7 +141,7 @@ public class TestContainerSQLCli {
}
assertEquals(2, nodeManager.getAllNodes().size());
AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, type,
factor, containerOwner);
factor, CONTAINER_OWNER);
pipeline1 = ab1.getPipeline();
blockContainerMap.put(ab1.getKey(), pipeline1.getContainerName());
@ -154,7 +154,7 @@ public class TestContainerSQLCli {
// the size of blockContainerMap will vary each time the test is run.
while (true) {
ab2 = blockManager
.allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner);
.allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, CONTAINER_OWNER);
pipeline2 = ab2.getPipeline();
blockContainerMap.put(ab2.getKey(), pipeline2.getContainerName());
if (!pipeline1.getContainerName().equals(pipeline2.getContainerName())) {

View File

@ -14,7 +14,7 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdds.scm.node;
package org.apache.hadoop.ozone.scm.node;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;

View File

@ -42,89 +42,73 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@State(Scope.Thread)
public class BenchMarkContainerStateMap {
public ContainerStateMap stateMap;
public AtomicInteger containerID;
private ContainerStateMap stateMap;
private AtomicInteger containerID;
@Setup(Level.Trial)
public void initialize() throws IOException {
stateMap = new ContainerStateMap();
Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
Preconditions.checkNotNull(pipeline, "Pipeline cannot be null.");
int currentCount = 1;
for (int x = 1; x < 1000; x++) {
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName())
.setState(CLOSED)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0)
.setUsedBytes(0)
.setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(x)
.build();
stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) {
e.printStackTrace();
}
}
for (int y = currentCount; y < 2000; y++) {
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName())
.setState(OPEN)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0)
.setUsedBytes(0)
.setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(y)
.build();
stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) {
e.printStackTrace();
}
}
@Setup(Level.Trial)
public void initialize() throws IOException {
stateMap = new ContainerStateMap();
Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
Preconditions.checkNotNull(pipeline, "Pipeline cannot be null.");
int currentCount = 1;
for (int x = 1; x < 1000; x++) {
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName())
.setState(OPEN)
.setContainerName(pipeline.getContainerName()).setState(CLOSED)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0)
.setUsedBytes(0)
.setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(currentCount++)
.build();
.setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setContainerID(x).build();
stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) {
e.printStackTrace();
}
containerID = new AtomicInteger(currentCount++);
}
for (int y = currentCount; y < 2000; y++) {
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName()).setState(OPEN)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setContainerID(y).build();
stateMap.addContainer(containerInfo);
currentCount++;
} catch (SCMException e) {
e.printStackTrace();
}
}
try {
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName()).setState(OPEN)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setContainerID(currentCount++).build();
stateMap.addContainer(containerInfo);
} catch (SCMException e) {
e.printStackTrace();
}
public static Pipeline createSingleNodePipeline(String containerName) throws
IOException {
containerID = new AtomicInteger(currentCount++);
}
public static Pipeline createSingleNodePipeline(String containerName)
throws IOException {
return createPipeline(containerName, 1);
}
@ -144,9 +128,8 @@ public class BenchMarkContainerStateMap {
return createPipeline(containerName, ids);
}
public static Pipeline createPipeline(
String containerName, Iterable<DatanodeDetails> ids)
throws IOException {
public static Pipeline createPipeline(String containerName,
Iterable<DatanodeDetails> ids) throws IOException {
Objects.requireNonNull(ids, "ids == null");
final Iterator<DatanodeDetails> i = ids.iterator();
Preconditions.checkArgument(i.hasNext());
@ -156,37 +139,33 @@ public class BenchMarkContainerStateMap {
new PipelineChannel(leader.getUuidString(), OPEN,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
pipelineChannel.addMember(leader);
for (; i.hasNext(); ) {
for (; i.hasNext();) {
pipelineChannel.addMember(i.next());
}
return new Pipeline(containerName, pipelineChannel);
}
@Benchmark
public void createContainerBenchMark(BenchMarkContainerStateMap state, Blackhole bh)
throws IOException {
public void createContainerBenchMark(BenchMarkContainerStateMap state,
Blackhole bh) throws IOException {
Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
int cid = state.containerID.incrementAndGet();
ContainerInfo containerInfo = new ContainerInfo.Builder()
.setContainerName(pipeline.getContainerName())
.setState(CLOSED)
.setContainerName(pipeline.getContainerName()).setState(CLOSED)
.setPipeline(pipeline)
// This is bytes allocated for blocks inside container, not the
// container size
.setAllocatedBytes(0)
.setUsedBytes(0)
.setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow())
.setOwner("OZONE")
.setContainerID(cid)
.build();
.setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0)
.setStateEnterTime(Time.monotonicNow()).setOwner("OZONE")
.setContainerID(cid).build();
state.stateMap.addContainer(containerInfo);
}
@Benchmark
public void getMatchingContainerBenchMark(BenchMarkContainerStateMap state,
Blackhole bh) {
bh.consume(state.stateMap.getMatchingContainerIDs(OPEN, "BILBO",
ReplicationFactor.ONE, ReplicationType.STAND_ALONE));
bh.consume(state.stateMap
.getMatchingContainerIDs(OPEN, "BILBO", ReplicationFactor.ONE,
ReplicationType.STAND_ALONE));
}
}

View File

@ -7,7 +7,6 @@ import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
import org.apache.hadoop.ozone.container.common.impl.Dispatcher;
@ -106,7 +105,8 @@ public class BenchMarkDatanodeDispatcher {
for (int y = 0; y < 100; y++) {
String containerName = "container-" + y;
dispatcher.dispatch(getWriteChunkCommand(containerName, chunkName));
dispatcher.dispatch(getPutKeyCommand(containerName, chunkName, keyName));
dispatcher
.dispatch(getPutKeyCommand(containerName, chunkName, keyName));
}
}
}
@ -119,8 +119,8 @@ public class BenchMarkDatanodeDispatcher {
private ContainerCommandRequestProto getCreateContainerCommand(
String containerName) {
CreateContainerRequestProto.Builder createRequest = CreateContainerRequestProto
.newBuilder();
CreateContainerRequestProto.Builder createRequest =
CreateContainerRequestProto.newBuilder();
createRequest.setPipeline(
new Pipeline(containerName, pipelineChannel).getProtobufMessage());
createRequest.setContainerData(
@ -174,12 +174,12 @@ public class BenchMarkDatanodeDispatcher {
private ContainerProtos.ChunkInfo getChunkInfo(
String containerName, String key) {
ContainerProtos.ChunkInfo.Builder builder = ContainerProtos.ChunkInfo
.newBuilder()
.setChunkName(
DigestUtils.md5Hex(key) + "_stream_" + containerName + "_chunk_" + key)
.setOffset(0)
.setLen(data.size());
ContainerProtos.ChunkInfo.Builder builder =
ContainerProtos.ChunkInfo.newBuilder()
.setChunkName(
DigestUtils.md5Hex(key) + "_stream_" + containerName + "_chunk_"
+ key)
.setOffset(0).setLen(data.size());
return builder.build();
}
@ -250,7 +250,7 @@ public class BenchMarkDatanodeDispatcher {
String containerName = "container-" + random.nextInt(containerCount.get());
String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
bmdd.dispatcher.dispatch(getPutKeyCommand(
containerName, chunkKey,"key-" + keyCount.getAndIncrement()));
containerName, chunkKey, "key-" + keyCount.getAndIncrement()));
}
@Benchmark

View File

@ -21,7 +21,7 @@ import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
public class BenchMarkMetadataStoreReads {
private static final int DATA_LEN = 1024;
private static final long maxKeys = 1024 * 10;
private static final long MAX_KEYS = 1024 * 10;
private MetadataStore store;
@ -33,7 +33,7 @@ public class BenchMarkMetadataStoreReads {
store = GenesisUtil.getMetadataStore(this.type);
byte[] data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
.getBytes(Charset.forName("UTF-8"));
for (int x = 0; x < maxKeys; x++) {
for (int x = 0; x < MAX_KEYS; x++) {
store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
}
if (type.compareTo(CLOSED_TYPE) == 0) {
@ -43,7 +43,7 @@ public class BenchMarkMetadataStoreReads {
@Benchmark
public void test(Blackhole bh) throws IOException {
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, maxKeys);
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
bh.consume(
store.get(Long.toHexString(x).getBytes(Charset.forName("UTF-8"))));
}

View File

@ -20,7 +20,7 @@ public class BenchMarkMetadataStoreWrites {
private static final int DATA_LEN = 1024;
private static final long maxKeys = 1024 * 10;
private static final long MAX_KEYS = 1024 * 10;
private MetadataStore store;
private byte[] data;
@ -37,7 +37,7 @@ public class BenchMarkMetadataStoreWrites {
@Benchmark
public void test() throws IOException {
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, maxKeys);
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
}
}

View File

@ -42,13 +42,13 @@ public class BenchMarkRocksDbStore {
private byte[] data;
@Param(value = {"8"})
private String blockSize;// 4KB default
private String blockSize; // 4KB default
@Param(value = {"64"})
private String writeBufferSize; //64 MB default
@Param(value = {"16"})
private String maxWriteBufferNumber;// 2 default
private String maxWriteBufferNumber; // 2 default
@Param(value = {"4"})
private String maxBackgroundFlushes; // 1 default

View File

@ -31,7 +31,11 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
* Hence, these classes do not use the Tool/Runner pattern of standard Hadoop
* CLI.
*/
public class Genesis {
public final class Genesis {
private Genesis() {
}
public static void main(String[] args) throws RunnerException {
Options opt = new OptionsBuilder()
.include(BenchMarkContainerStateMap.class.getSimpleName())

View File

@ -14,7 +14,7 @@ import java.util.Random;
/**
* Utility class for benchmark test cases.
*/
public class GenesisUtil {
public final class GenesisUtil {
private GenesisUtil() {
// private constructor.
@ -28,8 +28,8 @@ public class GenesisUtil {
private static final int DB_FILE_LEN = 7;
private static final String TMP_DIR = "java.io.tmpdir";
public static MetadataStore getMetadataStore(String dbType) throws IOException {
public static MetadataStore getMetadataStore(String dbType)
throws IOException {
Configuration conf = new Configuration();
MetadataStoreBuilder builder = MetadataStoreBuilder.newBuilder();
builder.setConf(conf);