HDDS-125. Cleanup HDDS CheckStyle issues.
Contributed by Anu Engineer.
This commit is contained in:
parent
17aa40f669
commit
9502b47bd2
|
@ -41,7 +41,6 @@ import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.UUID;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.locks.Lock;
|
import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
|
@ -190,7 +190,7 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
|
||||||
try {
|
try {
|
||||||
for(Long txID : txIDs) {
|
for(Long txID : txIDs) {
|
||||||
try {
|
try {
|
||||||
byte [] deleteBlockBytes =
|
byte[] deleteBlockBytes =
|
||||||
deletedStore.get(Longs.toByteArray(txID));
|
deletedStore.get(Longs.toByteArray(txID));
|
||||||
if (deleteBlockBytes == null) {
|
if (deleteBlockBytes == null) {
|
||||||
LOG.warn("Delete txID {} not found", txID);
|
LOG.warn("Delete txID {} not found", txID);
|
||||||
|
|
|
@ -152,7 +152,8 @@ public class ContainerMapping implements Mapping {
|
||||||
ContainerInfo containerInfo;
|
ContainerInfo containerInfo;
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
byte[] containerBytes = containerStore.get(Longs.toByteArray(containerID));
|
byte[] containerBytes = containerStore.get(
|
||||||
|
Longs.toByteArray(containerID));
|
||||||
if (containerBytes == null) {
|
if (containerBytes == null) {
|
||||||
throw new SCMException(
|
throw new SCMException(
|
||||||
"Specified key does not exist. key : " + containerID,
|
"Specified key does not exist. key : " + containerID,
|
||||||
|
@ -229,7 +230,8 @@ public class ContainerMapping implements Mapping {
|
||||||
containerStateManager.allocateContainer(
|
containerStateManager.allocateContainer(
|
||||||
pipelineSelector, type, replicationFactor, owner);
|
pipelineSelector, type, replicationFactor, owner);
|
||||||
|
|
||||||
byte[] containerIDBytes = Longs.toByteArray(containerInfo.getContainerID());
|
byte[] containerIDBytes = Longs.toByteArray(
|
||||||
|
containerInfo.getContainerID());
|
||||||
containerStore.put(containerIDBytes, containerInfo.getProtobuf()
|
containerStore.put(containerIDBytes, containerInfo.getProtobuf()
|
||||||
.toByteArray());
|
.toByteArray());
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -230,18 +230,18 @@ public class ContainerStateManager implements Closeable {
|
||||||
*
|
*
|
||||||
* Container State Flow:
|
* Container State Flow:
|
||||||
*
|
*
|
||||||
* [ALLOCATED]------->[CREATING]--------->[OPEN]---------->[CLOSING]------->[CLOSED]
|
* [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]------->[CLOSED]
|
||||||
* (CREATE) | (CREATED) (FINALIZE) (CLOSE) |
|
* (CREATE) | (CREATED) (FINALIZE) (CLOSE) |
|
||||||
* | |
|
* | |
|
||||||
* | |
|
* | |
|
||||||
* |(TIMEOUT) (DELETE)|
|
* |(TIMEOUT) (DELETE)|
|
||||||
* | |
|
* | |
|
||||||
* +------------------> [DELETING] <-------------------+
|
* +-------------> [DELETING] <-------------------+
|
||||||
* |
|
* |
|
||||||
* |
|
* |
|
||||||
* (CLEANUP)|
|
* (CLEANUP)|
|
||||||
* |
|
* |
|
||||||
* [DELETED]
|
* [DELETED]
|
||||||
*/
|
*/
|
||||||
private void initializeStateMachine() {
|
private void initializeStateMachine() {
|
||||||
stateMachine.addTransition(LifeCycleState.ALLOCATED,
|
stateMachine.addTransition(LifeCycleState.ALLOCATED,
|
||||||
|
|
|
@ -45,7 +45,8 @@ public interface Mapping extends Closeable {
|
||||||
* The max size of the searching range cannot exceed the
|
* The max size of the searching range cannot exceed the
|
||||||
* value of count.
|
* value of count.
|
||||||
*
|
*
|
||||||
* @param startContainerID start containerID, >=0, start searching at the head if 0.
|
* @param startContainerID start containerID, >=0,
|
||||||
|
* start searching at the head if 0.
|
||||||
* @param count count must be >= 0
|
* @param count count must be >= 0
|
||||||
* Usually the count will be replace with a very big
|
* Usually the count will be replace with a very big
|
||||||
* value instead of being unlimited in case the db is very big.
|
* value instead of being unlimited in case the db is very big.
|
||||||
|
@ -53,7 +54,8 @@ public interface Mapping extends Closeable {
|
||||||
* @return a list of container.
|
* @return a list of container.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
List<ContainerInfo> listContainer(long startContainerID, int count) throws IOException;
|
List<ContainerInfo> listContainer(long startContainerID, int count)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates a new container for a given keyName and replication factor.
|
* Allocates a new container for a given keyName and replication factor.
|
||||||
|
@ -64,7 +66,8 @@ public interface Mapping extends Closeable {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
|
ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
|
||||||
HddsProtos.ReplicationFactor replicationFactor, String owner) throws IOException;
|
HddsProtos.ReplicationFactor replicationFactor, String owner)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes a container from SCM.
|
* Deletes a container from SCM.
|
||||||
|
|
|
@ -31,7 +31,7 @@ import java.util.UUID;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface SCMNodeStorageStatMXBean {
|
public interface SCMNodeStorageStatMXBean {
|
||||||
/**
|
/**
|
||||||
* Get the capacity of the dataNode
|
* Get the capacity of the dataNode.
|
||||||
* @param datanodeID Datanode Id
|
* @param datanodeID Datanode Id
|
||||||
* @return long
|
* @return long
|
||||||
*/
|
*/
|
||||||
|
@ -52,7 +52,7 @@ public interface SCMNodeStorageStatMXBean {
|
||||||
long getUsedSpace(UUID datanodeId);
|
long getUsedSpace(UUID datanodeId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the total capacity of all dataNodes
|
* Returns the total capacity of all dataNodes.
|
||||||
* @return long
|
* @return long
|
||||||
*/
|
*/
|
||||||
long getTotalCapacity();
|
long getTotalCapacity();
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
|
||||||
// NodeStorageInfo MXBean
|
// NodeStorageInfo MXBean
|
||||||
private ObjectName scmNodeStorageInfoBean;
|
private ObjectName scmNodeStorageInfoBean;
|
||||||
/**
|
/**
|
||||||
* constructs the scmNodeStorageReportMap object
|
* constructs the scmNodeStorageReportMap object.
|
||||||
*/
|
*/
|
||||||
public SCMNodeStorageStatMap(OzoneConfiguration conf) {
|
public SCMNodeStorageStatMap(OzoneConfiguration conf) {
|
||||||
// scmNodeStorageReportMap = new ConcurrentHashMap<>();
|
// scmNodeStorageReportMap = new ConcurrentHashMap<>();
|
||||||
|
@ -73,6 +73,9 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
|
||||||
HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
|
HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enum that Describes what we should do at various thresholds.
|
||||||
|
*/
|
||||||
public enum UtilizationThreshold {
|
public enum UtilizationThreshold {
|
||||||
NORMAL, WARN, CRITICAL;
|
NORMAL, WARN, CRITICAL;
|
||||||
}
|
}
|
||||||
|
@ -107,8 +110,8 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
|
||||||
* @param datanodeID -- Datanode UUID
|
* @param datanodeID -- Datanode UUID
|
||||||
* @param report - set if StorageReports.
|
* @param report - set if StorageReports.
|
||||||
*/
|
*/
|
||||||
public void insertNewDatanode(UUID datanodeID, Set<StorageLocationReport> report)
|
public void insertNewDatanode(UUID datanodeID,
|
||||||
throws SCMException {
|
Set<StorageLocationReport> report) throws SCMException {
|
||||||
Preconditions.checkNotNull(report);
|
Preconditions.checkNotNull(report);
|
||||||
Preconditions.checkState(report.size() != 0);
|
Preconditions.checkState(report.size() != 0);
|
||||||
Preconditions.checkNotNull(datanodeID);
|
Preconditions.checkNotNull(datanodeID);
|
||||||
|
@ -142,8 +145,8 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
|
||||||
* @throws SCMException - if we don't know about this datanode, for new DN
|
* @throws SCMException - if we don't know about this datanode, for new DN
|
||||||
* use insertNewDatanode.
|
* use insertNewDatanode.
|
||||||
*/
|
*/
|
||||||
public void updateDatanodeMap(UUID datanodeID, Set<StorageLocationReport> report)
|
public void updateDatanodeMap(UUID datanodeID,
|
||||||
throws SCMException {
|
Set<StorageLocationReport> report) throws SCMException {
|
||||||
Preconditions.checkNotNull(datanodeID);
|
Preconditions.checkNotNull(datanodeID);
|
||||||
Preconditions.checkNotNull(report);
|
Preconditions.checkNotNull(report);
|
||||||
Preconditions.checkState(report.size() != 0);
|
Preconditions.checkState(report.size() != 0);
|
||||||
|
@ -301,7 +304,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* removes the dataNode from scmNodeStorageReportMap
|
* removes the dataNode from scmNodeStorageReportMap.
|
||||||
* @param datanodeID
|
* @param datanodeID
|
||||||
* @throws SCMException in case the dataNode is not found in the map.
|
* @throws SCMException in case the dataNode is not found in the map.
|
||||||
*/
|
*/
|
||||||
|
@ -339,11 +342,11 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get the scmUsed ratio
|
* get the scmUsed ratio.
|
||||||
*/
|
*/
|
||||||
public double getScmUsedratio(long scmUsed, long capacity) {
|
public double getScmUsedratio(long scmUsed, long capacity) {
|
||||||
double scmUsedRatio =
|
double scmUsedRatio =
|
||||||
truncateDecimals (scmUsed / (double) capacity);
|
truncateDecimals(scmUsed / (double) capacity);
|
||||||
return scmUsedRatio;
|
return scmUsedRatio;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -69,14 +69,14 @@ public class StorageReportResult {
|
||||||
}
|
}
|
||||||
|
|
||||||
public ReportResultBuilder setFullVolumeSet(
|
public ReportResultBuilder setFullVolumeSet(
|
||||||
Set<StorageLocationReport> fullVolumes) {
|
Set<StorageLocationReport> fullVolumesSet) {
|
||||||
this.fullVolumes = fullVolumes;
|
this.fullVolumes = fullVolumesSet;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ReportResultBuilder setFailedVolumeSet(
|
public ReportResultBuilder setFailedVolumeSet(
|
||||||
Set<StorageLocationReport> failedVolumes) {
|
Set<StorageLocationReport> failedVolumesSet) {
|
||||||
this.failedVolumes = failedVolumes;
|
this.failedVolumes = failedVolumesSet;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ public class Node2ContainerMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes datanode Entry from the map
|
* Removes datanode Entry from the map.
|
||||||
* @param datanodeID - Datanode ID.
|
* @param datanodeID - Datanode ID.
|
||||||
*/
|
*/
|
||||||
public void removeDatanode(UUID datanodeID) {
|
public void removeDatanode(UUID datanodeID) {
|
||||||
|
|
|
@ -170,8 +170,9 @@ public class PipelineSelector {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
PipelineManager manager = getPipelineManager(replicationType);
|
PipelineManager manager = getPipelineManager(replicationType);
|
||||||
Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
|
Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
|
||||||
LOG.debug("Getting replication pipeline forReplicationType {} : ReplicationFactor {}",
|
LOG.debug("Getting replication pipeline forReplicationType {} :" +
|
||||||
replicationType.toString(), replicationFactor.toString());
|
" ReplicationFactor {}", replicationType.toString(),
|
||||||
|
replicationFactor.toString());
|
||||||
return manager.
|
return manager.
|
||||||
getPipeline(replicationFactor, replicationType);
|
getPipeline(replicationFactor, replicationType);
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
|
||||||
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
|
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
|
@ -87,7 +86,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
* create a container, which then can be used to store data.
|
* create a container, which then can be used to store data.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
|
||||||
public class StorageContainerManager extends ServiceRuntimeInfoImpl
|
public final class StorageContainerManager extends ServiceRuntimeInfoImpl
|
||||||
implements SCMMXBean {
|
implements SCMMXBean {
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory
|
private static final Logger LOG = LoggerFactory
|
||||||
|
|
|
@ -39,7 +39,6 @@ import org.junit.runners.Parameterized.Parameters;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.MalformedURLException;
|
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.net.URLConnection;
|
import java.net.URLConnection;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make checkstyle happy.
|
||||||
|
* */
|
||||||
|
package org.apache.hadoop.hdds.scm.block;
|
|
@ -216,8 +216,10 @@ public class TestContainerMapping {
|
||||||
|
|
||||||
mapping.processContainerReports(crBuilder.build());
|
mapping.processContainerReports(crBuilder.build());
|
||||||
|
|
||||||
ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
|
ContainerInfo updatedContainer =
|
||||||
Assert.assertEquals(100000000L, updatedContainer.getNumberOfKeys());
|
mapping.getContainer(info.getContainerID());
|
||||||
|
Assert.assertEquals(100000000L,
|
||||||
|
updatedContainer.getNumberOfKeys());
|
||||||
Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
|
Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,8 +253,10 @@ public class TestContainerMapping {
|
||||||
|
|
||||||
mapping.processContainerReports(crBuilder.build());
|
mapping.processContainerReports(crBuilder.build());
|
||||||
|
|
||||||
ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
|
ContainerInfo updatedContainer =
|
||||||
Assert.assertEquals(500000000L, updatedContainer.getNumberOfKeys());
|
mapping.getContainer(info.getContainerID());
|
||||||
|
Assert.assertEquals(500000000L,
|
||||||
|
updatedContainer.getNumberOfKeys());
|
||||||
Assert.assertEquals(5368705120L, updatedContainer.getUsedBytes());
|
Assert.assertEquals(5368705120L, updatedContainer.getUsedBytes());
|
||||||
NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager()
|
NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager()
|
||||||
.getMatchingContainerIDs(
|
.getMatchingContainerIDs(
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Make CheckStyle happy.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdds.scm.container.closer;
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Make CheckStyle Happy.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdds.scm.container;
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Make CheckStyle Happy.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdds.scm.container.states;
|
|
@ -510,42 +510,42 @@ public class TestNodeManager {
|
||||||
* @throws InterruptedException
|
* @throws InterruptedException
|
||||||
* @throws TimeoutException
|
* @throws TimeoutException
|
||||||
*/
|
*/
|
||||||
|
/**
|
||||||
|
* These values are very important. Here is what it means so you don't
|
||||||
|
* have to look it up while reading this code.
|
||||||
|
*
|
||||||
|
* OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
|
||||||
|
* HB processing thread that is running in the SCM. This thread must run
|
||||||
|
* for the SCM to process the Heartbeats.
|
||||||
|
*
|
||||||
|
* OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
|
||||||
|
* datanodes will send heartbeats to SCM. Please note: This is the only
|
||||||
|
* config value for node manager that is specified in seconds. We don't
|
||||||
|
* want SCM heartbeat resolution to be more than in seconds.
|
||||||
|
* In this test it is not used, but we are forced to set it because we
|
||||||
|
* have validation code that checks Stale Node interval and Dead Node
|
||||||
|
* interval is larger than the value of
|
||||||
|
* OZONE_SCM_HEARTBEAT_INTERVAL.
|
||||||
|
*
|
||||||
|
* OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
|
||||||
|
* from the last heartbeat for us to mark a node as stale. In this test
|
||||||
|
* we set that to 3. That is if a node has not heartbeat SCM for last 3
|
||||||
|
* seconds we will mark it as stale.
|
||||||
|
*
|
||||||
|
* OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
|
||||||
|
* from the last heartbeat for a node to be marked dead. We have an
|
||||||
|
* additional constraint that this must be at least 2 times bigger than
|
||||||
|
* Stale node Interval.
|
||||||
|
*
|
||||||
|
* With these we are trying to explore the state of this cluster with
|
||||||
|
* various timeouts. Each section is commented so that you can keep
|
||||||
|
* track of the state of the cluster nodes.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testScmClusterIsInExpectedState1() throws IOException,
|
public void testScmClusterIsInExpectedState1() throws IOException,
|
||||||
InterruptedException, TimeoutException {
|
InterruptedException, TimeoutException {
|
||||||
/**
|
|
||||||
* These values are very important. Here is what it means so you don't
|
|
||||||
* have to look it up while reading this code.
|
|
||||||
*
|
|
||||||
* OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
|
|
||||||
* HB processing thread that is running in the SCM. This thread must run
|
|
||||||
* for the SCM to process the Heartbeats.
|
|
||||||
*
|
|
||||||
* OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
|
|
||||||
* datanodes will send heartbeats to SCM. Please note: This is the only
|
|
||||||
* config value for node manager that is specified in seconds. We don't
|
|
||||||
* want SCM heartbeat resolution to be more than in seconds.
|
|
||||||
* In this test it is not used, but we are forced to set it because we
|
|
||||||
* have validation code that checks Stale Node interval and Dead Node
|
|
||||||
* interval is larger than the value of
|
|
||||||
* OZONE_SCM_HEARTBEAT_INTERVAL.
|
|
||||||
*
|
|
||||||
* OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
|
|
||||||
* from the last heartbeat for us to mark a node as stale. In this test
|
|
||||||
* we set that to 3. That is if a node has not heartbeat SCM for last 3
|
|
||||||
* seconds we will mark it as stale.
|
|
||||||
*
|
|
||||||
* OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
|
|
||||||
* from the last heartbeat for a node to be marked dead. We have an
|
|
||||||
* additional constraint that this must be at least 2 times bigger than
|
|
||||||
* Stale node Interval.
|
|
||||||
*
|
|
||||||
* With these we are trying to explore the state of this cluster with
|
|
||||||
* various timeouts. Each section is commented so that you can keep
|
|
||||||
* track of the state of the cluster nodes.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
OzoneConfiguration conf = getConf();
|
OzoneConfiguration conf = getConf();
|
||||||
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
|
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
|
||||||
MILLISECONDS);
|
MILLISECONDS);
|
||||||
|
|
|
@ -42,11 +42,14 @@ import java.util.HashSet;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test Node Storage Map.
|
||||||
|
*/
|
||||||
public class TestSCMNodeStorageStatMap {
|
public class TestSCMNodeStorageStatMap {
|
||||||
private final static int DATANODE_COUNT = 100;
|
private final static int DATANODE_COUNT = 100;
|
||||||
final long capacity = 10L * OzoneConsts.GB;
|
private final long capacity = 10L * OzoneConsts.GB;
|
||||||
final long used = 2L * OzoneConsts.GB;
|
private final long used = 2L * OzoneConsts.GB;
|
||||||
final long remaining = capacity - used;
|
private final long remaining = capacity - used;
|
||||||
private static OzoneConfiguration conf = new OzoneConfiguration();
|
private static OzoneConfiguration conf = new OzoneConfiguration();
|
||||||
private final Map<UUID, Set<StorageLocationReport>> testData =
|
private final Map<UUID, Set<StorageLocationReport>> testData =
|
||||||
new ConcurrentHashMap<>();
|
new ConcurrentHashMap<>();
|
||||||
|
@ -59,9 +62,10 @@ public class TestSCMNodeStorageStatMap {
|
||||||
UUID dnId = UUID.randomUUID();
|
UUID dnId = UUID.randomUUID();
|
||||||
Set<StorageLocationReport> reportSet = new HashSet<>();
|
Set<StorageLocationReport> reportSet = new HashSet<>();
|
||||||
String path = GenericTestUtils.getTempPath(
|
String path = GenericTestUtils.getTempPath(
|
||||||
TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + Integer
|
TestSCMNodeStorageStatMap.class.getSimpleName() + "-" +
|
||||||
.toString(dnIndex));
|
Integer.toString(dnIndex));
|
||||||
StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
|
StorageLocationReport.Builder builder =
|
||||||
|
StorageLocationReport.newBuilder();
|
||||||
builder.setStorageType(StorageType.DISK).setId(dnId.toString())
|
builder.setStorageType(StorageType.DISK).setId(dnId.toString())
|
||||||
.setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
|
.setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
|
||||||
.setCapacity(capacity).setFailed(false);
|
.setCapacity(capacity).setFailed(false);
|
||||||
|
@ -139,12 +143,12 @@ public class TestSCMNodeStorageStatMap {
|
||||||
String path =
|
String path =
|
||||||
GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
|
GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
|
||||||
StorageLocationReport report = reportSet.iterator().next();
|
StorageLocationReport report = reportSet.iterator().next();
|
||||||
long capacity = report.getCapacity();
|
long reportCapacity = report.getCapacity();
|
||||||
long used = report.getScmUsed();
|
long reportScmUsed = report.getScmUsed();
|
||||||
long remaining = report.getRemaining();
|
long reportRemaining = report.getRemaining();
|
||||||
List<SCMStorageReport> reports = TestUtils
|
List<SCMStorageReport> reports = TestUtils
|
||||||
.createStorageReport(capacity, used, remaining, path, null, storageId,
|
.createStorageReport(reportCapacity, reportScmUsed, reportRemaining,
|
||||||
1);
|
path, null, storageId, 1);
|
||||||
StorageReportResult result =
|
StorageReportResult result =
|
||||||
map.processNodeReport(key, TestUtils.createNodeReport(reports));
|
map.processNodeReport(key, TestUtils.createNodeReport(reports));
|
||||||
Assert.assertEquals(result.getStatus(),
|
Assert.assertEquals(result.getStatus(),
|
||||||
|
@ -158,7 +162,7 @@ public class TestSCMNodeStorageStatMap {
|
||||||
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
|
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
|
||||||
|
|
||||||
reportList.add(TestUtils
|
reportList.add(TestUtils
|
||||||
.createStorageReport(capacity, capacity, 0, path, null,
|
.createStorageReport(reportCapacity, reportCapacity, 0, path, null,
|
||||||
UUID.randomUUID().toString(), 1).get(0));
|
UUID.randomUUID().toString(), 1).get(0));
|
||||||
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
||||||
Assert.assertEquals(result.getStatus(),
|
Assert.assertEquals(result.getStatus(),
|
||||||
|
@ -166,8 +170,8 @@ public class TestSCMNodeStorageStatMap {
|
||||||
// Mark a disk failed
|
// Mark a disk failed
|
||||||
SCMStorageReport srb2 = SCMStorageReport.newBuilder()
|
SCMStorageReport srb2 = SCMStorageReport.newBuilder()
|
||||||
.setStorageUuid(UUID.randomUUID().toString())
|
.setStorageUuid(UUID.randomUUID().toString())
|
||||||
.setStorageLocation(srb.getStorageLocation()).setScmUsed(capacity)
|
.setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity)
|
||||||
.setCapacity(capacity).setRemaining(0).setFailed(true).build();
|
.setCapacity(reportCapacity).setRemaining(0).setFailed(true).build();
|
||||||
reportList.add(srb2);
|
reportList.add(srb2);
|
||||||
nrb.addAllStorageReport(reportList);
|
nrb.addAllStorageReport(reportList);
|
||||||
result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
|
result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Make CheckStyle Happy.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdds.scm.node;
|
|
@ -32,8 +32,6 @@ import org.apache.hadoop.hdds.protocol.proto
|
||||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
|
.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
import org.apache.hadoop.hdds.protocol.proto
|
||||||
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
|
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
|
||||||
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
import org.apache.hadoop.hdds.protocol.proto
|
||||||
.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
|
.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
import org.apache.hadoop.hdds.protocol.proto
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Make CheckStyle Happy.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.container.common;
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Make CheckStyle Happy.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.container.placement;
|
|
@ -202,8 +202,8 @@ public class TestContainerSupervisor {
|
||||||
ppool.handleContainerReport(reportsProto);
|
ppool.handleContainerReport(reportsProto);
|
||||||
}
|
}
|
||||||
|
|
||||||
clist = datanodeStateManager.getContainerReport(wayOverReplicatedContainerID,
|
clist = datanodeStateManager.getContainerReport(
|
||||||
ppool.getPool().getPoolName(), 7);
|
wayOverReplicatedContainerID, ppool.getPool().getPoolName(), 7);
|
||||||
|
|
||||||
for (ContainerReportsRequestProto reportsProto : clist) {
|
for (ContainerReportsRequestProto reportsProto : clist) {
|
||||||
ppool.handleContainerReport(reportsProto);
|
ppool.handleContainerReport(reportsProto);
|
||||||
|
@ -264,7 +264,8 @@ public class TestContainerSupervisor {
|
||||||
"PoolNew", 1);
|
"PoolNew", 1);
|
||||||
containerSupervisor.handleContainerReport(clist.get(0));
|
containerSupervisor.handleContainerReport(clist.get(0));
|
||||||
GenericTestUtils.waitFor(() ->
|
GenericTestUtils.waitFor(() ->
|
||||||
inProgressLog.getOutput().contains(Long.toString(newContainerID)) && inProgressLog
|
inProgressLog.getOutput()
|
||||||
|
.contains(Long.toString(newContainerID)) && inProgressLog
|
||||||
.getOutput().contains(id.getUuidString()),
|
.getOutput().contains(id.getUuidString()),
|
||||||
200, 10 * 1000);
|
200, 10 * 1000);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
Loading…
Reference in New Issue