parent
f1673b0db1
commit
a2887f5c23
|
@ -0,0 +1,47 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.conf;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Mark field to be configurable from ozone-site.xml.
|
||||
*/
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.METHOD)
|
||||
public @interface Config {
|
||||
|
||||
/**
|
||||
* Configuration fragment relative to the prefix defined with @ConfigGroup.
|
||||
*/
|
||||
String key();
|
||||
|
||||
/**
|
||||
* Type of configuration. Use AUTO to decide it based on the java type.
|
||||
*/
|
||||
ConfigType type() default ConfigType.AUTO;
|
||||
|
||||
/**
|
||||
* If type == TIME the unit should be defined with this attribute.
|
||||
*/
|
||||
TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.conf;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
/**
|
||||
* Mark pojo which holds configuration variables.
|
||||
*/
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface ConfigGroup {
|
||||
String prefix();
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.conf;
|
||||
|
||||
/**
|
||||
* Possible type of injected configuration.
|
||||
* <p>
|
||||
* AUTO means that the exact type will be identified based on the java type of
|
||||
* the configuration field.
|
||||
*/
|
||||
public enum ConfigType {
|
||||
AUTO,
|
||||
STRING,
|
||||
BOOLEAN,
|
||||
INT,
|
||||
LONG,
|
||||
TIME,
|
||||
SIZE
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.conf;
|
||||
|
||||
/**
|
||||
* Exeception to throw in case of a configuration problem.
|
||||
*/
|
||||
public class ConfigurationException extends RuntimeException {
|
||||
public ConfigurationException() {
|
||||
}
|
||||
|
||||
public ConfigurationException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public ConfigurationException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
}
|
|
@ -28,6 +28,8 @@ import javax.xml.bind.annotation.XmlAccessType;
|
|||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Enumeration;
|
||||
|
@ -61,6 +63,108 @@ public class OzoneConfiguration extends Configuration {
|
|||
return config.getProperties();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Configuration object and inject the required configuration values.
|
||||
*
|
||||
* @param configurationClass The class where the fields are annotated with
|
||||
* the configuration.
|
||||
* @return Initiated java object where the config fields are injected.
|
||||
*/
|
||||
public <T> T getObject(Class<T> configurationClass) {
|
||||
|
||||
T configuration;
|
||||
|
||||
try {
|
||||
configuration = configurationClass.newInstance();
|
||||
} catch (InstantiationException | IllegalAccessException e) {
|
||||
throw new ConfigurationException(
|
||||
"Configuration class can't be created: " + configurationClass, e);
|
||||
}
|
||||
ConfigGroup configGroup =
|
||||
configurationClass.getAnnotation(ConfigGroup.class);
|
||||
String prefix = configGroup.prefix();
|
||||
|
||||
for (Method setterMethod : configurationClass.getMethods()) {
|
||||
if (setterMethod.isAnnotationPresent(Config.class)) {
|
||||
|
||||
String methodLocation =
|
||||
configurationClass + "." + setterMethod.getName();
|
||||
|
||||
Config configAnnotation = setterMethod.getAnnotation(Config.class);
|
||||
|
||||
String key = prefix + "." + configAnnotation.key();
|
||||
|
||||
Class<?>[] parameterTypes = setterMethod.getParameterTypes();
|
||||
if (parameterTypes.length != 1) {
|
||||
throw new ConfigurationException(
|
||||
"@Config annotation should be used on simple setter: "
|
||||
+ methodLocation);
|
||||
}
|
||||
|
||||
ConfigType type = configAnnotation.type();
|
||||
|
||||
if (type == ConfigType.AUTO) {
|
||||
type = detectConfigType(parameterTypes[0], methodLocation);
|
||||
}
|
||||
|
||||
//Note: default value is handled by ozone-default.xml. Here we can
|
||||
//use any default.
|
||||
try {
|
||||
switch (type) {
|
||||
case STRING:
|
||||
setterMethod.invoke(configuration, get(key));
|
||||
break;
|
||||
case INT:
|
||||
setterMethod.invoke(configuration,
|
||||
getInt(key, 0));
|
||||
break;
|
||||
case BOOLEAN:
|
||||
setterMethod.invoke(configuration,
|
||||
getBoolean(key, false));
|
||||
break;
|
||||
case LONG:
|
||||
setterMethod.invoke(configuration,
|
||||
getLong(key, 0));
|
||||
break;
|
||||
case TIME:
|
||||
setterMethod.invoke(configuration,
|
||||
getTimeDuration(key, 0, configAnnotation.timeUnit()));
|
||||
break;
|
||||
default:
|
||||
throw new ConfigurationException(
|
||||
"Unsupported ConfigType " + type + " on " + methodLocation);
|
||||
}
|
||||
} catch (InvocationTargetException | IllegalAccessException e) {
|
||||
throw new ConfigurationException(
|
||||
"Can't inject configuration to " + methodLocation, e);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return configuration;
|
||||
|
||||
}
|
||||
|
||||
private ConfigType detectConfigType(Class<?> parameterType,
|
||||
String methodLocation) {
|
||||
ConfigType type;
|
||||
if (parameterType == String.class) {
|
||||
type = ConfigType.STRING;
|
||||
} else if (parameterType == Integer.class || parameterType == int.class) {
|
||||
type = ConfigType.INT;
|
||||
} else if (parameterType == Long.class || parameterType == long.class) {
|
||||
type = ConfigType.LONG;
|
||||
} else if (parameterType == Boolean.class
|
||||
|| parameterType == boolean.class) {
|
||||
type = ConfigType.BOOLEAN;
|
||||
} else {
|
||||
throw new ConfigurationException(
|
||||
"Unsupported configuration type " + parameterType + " in "
|
||||
+ methodLocation);
|
||||
}
|
||||
return type;
|
||||
}
|
||||
|
||||
/**
|
||||
* Class to marshall/un-marshall configuration from xml files.
|
||||
*/
|
||||
|
@ -145,7 +249,7 @@ public class OzoneConfiguration extends Configuration {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int hashCode(){
|
||||
public int hashCode() {
|
||||
return this.getName().hashCode();
|
||||
}
|
||||
|
||||
|
@ -169,6 +273,7 @@ public class OzoneConfiguration extends Configuration {
|
|||
* does not override values of properties
|
||||
* if there is no tag present in the configs of
|
||||
* newly added resources.
|
||||
*
|
||||
* @param tag
|
||||
* @return Properties that belong to the tag
|
||||
*/
|
||||
|
@ -181,7 +286,7 @@ public class OzoneConfiguration extends Configuration {
|
|||
Properties props = new Properties();
|
||||
Enumeration properties = propertiesByTag.propertyNames();
|
||||
while (properties.hasMoreElements()) {
|
||||
Object propertyName = properties.nextElement();
|
||||
Object propertyName = properties.nextElement();
|
||||
// get the current value of the property
|
||||
Object value = updatedProps.getProperty(propertyName.toString());
|
||||
if (value != null) {
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.conf;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Example configuration to test the configuration injection.
|
||||
*/
|
||||
@ConfigGroup(prefix = "ozone.scm.client")
|
||||
public class SimpleConfiguration {
|
||||
|
||||
private String clientAddress;
|
||||
|
||||
private String bindHost;
|
||||
|
||||
private boolean enabled;
|
||||
|
||||
private int port = 1234;
|
||||
|
||||
private long waitTime = 1;
|
||||
|
||||
@Config(key = "address")
|
||||
public void setClientAddress(String clientAddress) {
|
||||
this.clientAddress = clientAddress;
|
||||
}
|
||||
|
||||
@Config(key = "bind.host")
|
||||
public void setBindHost(String bindHost) {
|
||||
this.bindHost = bindHost;
|
||||
}
|
||||
|
||||
@Config(key = "enabled")
|
||||
public void setEnabled(boolean enabled) {
|
||||
this.enabled = enabled;
|
||||
}
|
||||
|
||||
@Config(key = "port")
|
||||
public void setPort(int port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
@Config(key = "wait", type = ConfigType.TIME, timeUnit =
|
||||
TimeUnit.SECONDS)
|
||||
public void setWaitTime(long waitTime) {
|
||||
this.waitTime = waitTime;
|
||||
}
|
||||
|
||||
public String getClientAddress() {
|
||||
return clientAddress;
|
||||
}
|
||||
|
||||
public String getBindHost() {
|
||||
return bindHost;
|
||||
}
|
||||
|
||||
public boolean isEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public long getWaitTime() {
|
||||
return waitTime;
|
||||
}
|
||||
}
|
|
@ -29,6 +29,7 @@ import java.io.BufferedWriter;
|
|||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Test class for OzoneConfiguration.
|
||||
|
@ -97,6 +98,37 @@ public class TestOzoneConfiguration {
|
|||
.getProperty("dfs.cblock.trace.io"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getConfigurationObject() {
|
||||
OzoneConfiguration ozoneConfig = new OzoneConfiguration();
|
||||
ozoneConfig.set("ozone.scm.client.address", "address");
|
||||
ozoneConfig.set("ozone.scm.client.bind.host", "host");
|
||||
ozoneConfig.setBoolean("ozone.scm.client.enabled", true);
|
||||
ozoneConfig.setInt("ozone.scm.client.port", 5555);
|
||||
ozoneConfig.setTimeDuration("ozone.scm.client.wait", 10, TimeUnit.MINUTES);
|
||||
|
||||
SimpleConfiguration configuration =
|
||||
ozoneConfig.getObject(SimpleConfiguration.class);
|
||||
|
||||
Assert.assertEquals("host", configuration.getBindHost());
|
||||
Assert.assertEquals("address", configuration.getClientAddress());
|
||||
Assert.assertEquals(true, configuration.isEnabled());
|
||||
Assert.assertEquals(5555, configuration.getPort());
|
||||
Assert.assertEquals(600, configuration.getWaitTime());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getConfigurationObjectWithDefault() {
|
||||
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
|
||||
|
||||
SimpleConfiguration configuration =
|
||||
ozoneConfiguration.getObject(SimpleConfiguration.class);
|
||||
|
||||
Assert.assertEquals(false, configuration.isEnabled());
|
||||
Assert.assertEquals(9860, configuration.getPort());
|
||||
}
|
||||
|
||||
|
||||
private void appendProperty(BufferedWriter out, String name, String val)
|
||||
throws IOException {
|
||||
this.appendProperty(out, name, val, false);
|
||||
|
|
|
@ -21,12 +21,13 @@ package org.apache.hadoop.hdds.scm.container;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.GeneratedMessage;
|
||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.conf.ConfigType;
|
||||
import org.apache.hadoop.hdds.conf.ConfigGroup;
|
||||
import org.apache.hadoop.hdds.conf.Config;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
|
||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||
.ContainerPlacementPolicy;
|
||||
import org.apache.hadoop.hdds.scm.events.SCMEvents;
|
||||
|
@ -108,15 +109,9 @@ public class ReplicationManager {
|
|||
private final Thread replicationMonitor;
|
||||
|
||||
/**
|
||||
* The frequency in which ReplicationMonitor thread should run.
|
||||
* ReplicationManager specific configuration.
|
||||
*/
|
||||
private final long interval;
|
||||
|
||||
/**
|
||||
* Timeout for container replication & deletion command issued by
|
||||
* ReplicationManager.
|
||||
*/
|
||||
private final long eventTimeout;
|
||||
private final ReplicationManagerConfiguration conf;
|
||||
|
||||
/**
|
||||
* Flag used for checking if the ReplicationMonitor thread is running or
|
||||
|
@ -132,27 +127,21 @@ public class ReplicationManager {
|
|||
* @param containerPlacement ContainerPlacementPolicy
|
||||
* @param eventPublisher EventPublisher
|
||||
*/
|
||||
public ReplicationManager(final Configuration conf,
|
||||
public ReplicationManager(final ReplicationManagerConfiguration conf,
|
||||
final ContainerManager containerManager,
|
||||
final ContainerPlacementPolicy containerPlacement,
|
||||
final EventPublisher eventPublisher) {
|
||||
final EventPublisher eventPublisher,
|
||||
final LockManager lockManager) {
|
||||
this.containerManager = containerManager;
|
||||
this.containerPlacement = containerPlacement;
|
||||
this.eventPublisher = eventPublisher;
|
||||
this.lockManager = new LockManager<>(conf);
|
||||
this.lockManager = lockManager;
|
||||
this.inflightReplication = new HashMap<>();
|
||||
this.inflightDeletion = new HashMap<>();
|
||||
this.replicationMonitor = new Thread(this::run);
|
||||
this.replicationMonitor.setName("ReplicationMonitor");
|
||||
this.replicationMonitor.setDaemon(true);
|
||||
this.interval = conf.getTimeDuration(
|
||||
ScmConfigKeys.HDDS_SCM_REPLICATION_THREAD_INTERVAL,
|
||||
ScmConfigKeys.HDDS_SCM_REPLICATION_THREAD_INTERVAL_DEFAULT,
|
||||
TimeUnit.MILLISECONDS);
|
||||
this.eventTimeout = conf.getTimeDuration(
|
||||
ScmConfigKeys.HDDS_SCM_REPLICATION_EVENT_TIMEOUT,
|
||||
ScmConfigKeys.HDDS_SCM_REPLICATION_EVENT_TIMEOUT_DEFAULT,
|
||||
TimeUnit.MILLISECONDS);
|
||||
this.conf = conf;
|
||||
this.running = false;
|
||||
}
|
||||
|
||||
|
@ -217,7 +206,7 @@ public class ReplicationManager {
|
|||
" processing {} containers.", Time.monotonicNow() - start,
|
||||
containerIds.size());
|
||||
|
||||
wait(interval);
|
||||
wait(conf.getInterval());
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
// When we get runtime exception, we should terminate SCM.
|
||||
|
@ -337,7 +326,7 @@ public class ReplicationManager {
|
|||
final Map<ContainerID, List<InflightAction>> inflightActions,
|
||||
final Predicate<InflightAction> filter) {
|
||||
final ContainerID id = container.containerID();
|
||||
final long deadline = Time.monotonicNow() - eventTimeout;
|
||||
final long deadline = Time.monotonicNow() - conf.getEventTimeout();
|
||||
if (inflightActions.containsKey(id)) {
|
||||
final List<InflightAction> actions = inflightActions.get(id);
|
||||
actions.removeIf(action -> action.time < deadline);
|
||||
|
@ -754,4 +743,41 @@ public class ReplicationManager {
|
|||
this.time = time;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration used by the Replication Manager.
|
||||
*/
|
||||
@ConfigGroup(prefix = "hdds.scm.replication")
|
||||
public static class ReplicationManagerConfiguration {
|
||||
/**
|
||||
* The frequency in which ReplicationMonitor thread should run.
|
||||
*/
|
||||
private long interval = 5 * 60 * 1000;
|
||||
|
||||
/**
|
||||
* Timeout for container replication & deletion command issued by
|
||||
* ReplicationManager.
|
||||
*/
|
||||
private long eventTimeout = 10 * 60 * 1000;
|
||||
|
||||
@Config(key = "thread.interval", type = ConfigType.TIME, timeUnit =
|
||||
TimeUnit.MILLISECONDS)
|
||||
public void setInterval(long interval) {
|
||||
this.interval = interval;
|
||||
}
|
||||
|
||||
@Config(key = "event.timeout", type = ConfigType.TIME, timeUnit =
|
||||
TimeUnit.MILLISECONDS)
|
||||
public void setEventTimeout(long eventTimeout) {
|
||||
this.eventTimeout = eventTimeout;
|
||||
}
|
||||
|
||||
public long getInterval() {
|
||||
return interval;
|
||||
}
|
||||
|
||||
public long getEventTimeout() {
|
||||
return eventTimeout;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.scm.block.BlockManager;
|
|||
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
|
||||
import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
|
||||
import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
|
||||
import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
|
||||
import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler;
|
||||
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
|
||||
import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
|
||||
|
@ -89,6 +90,7 @@ import org.apache.hadoop.ozone.OzoneSecurityUtil;
|
|||
import org.apache.hadoop.ozone.common.Storage.StorageState;
|
||||
import org.apache.hadoop.ozone.common.StorageInfo;
|
||||
import org.apache.hadoop.ozone.lease.LeaseManager;
|
||||
import org.apache.hadoop.ozone.lock.LockManager;
|
||||
import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -415,8 +417,12 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
|
|||
if (configurator.getReplicationManager() != null) {
|
||||
replicationManager = configurator.getReplicationManager();
|
||||
} else {
|
||||
replicationManager = new ReplicationManager(conf,
|
||||
containerManager, containerPlacementPolicy, eventQueue);
|
||||
replicationManager = new ReplicationManager(
|
||||
conf.getObject(ReplicationManagerConfiguration.class),
|
||||
containerManager,
|
||||
containerPlacementPolicy,
|
||||
eventQueue,
|
||||
new LockManager<>(conf));
|
||||
}
|
||||
if(configurator.getScmSafeModeManager() != null) {
|
||||
scmSafeModeManager = configurator.getScmSafeModeManager();
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto
|
|||
.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
|
||||
import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
|
||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||
.ContainerPlacementPolicy;
|
||||
import org.apache.hadoop.hdds.scm.events.SCMEvents;
|
||||
|
@ -33,6 +34,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
|||
import org.apache.hadoop.hdds.server.events.EventHandler;
|
||||
import org.apache.hadoop.hdds.server.events.EventPublisher;
|
||||
import org.apache.hadoop.hdds.server.events.EventQueue;
|
||||
import org.apache.hadoop.ozone.lock.LockManager;
|
||||
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
@ -104,7 +106,11 @@ public class TestReplicationManager {
|
|||
});
|
||||
|
||||
replicationManager = new ReplicationManager(
|
||||
conf, containerManager, containerPlacementPolicy, eventQueue);
|
||||
new ReplicationManagerConfiguration(),
|
||||
containerManager,
|
||||
containerPlacementPolicy,
|
||||
eventQueue,
|
||||
new LockManager<>(conf));
|
||||
replicationManager.start();
|
||||
Thread.sleep(100L);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.block.BlockManager;
|
|||
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
|
||||
import org.apache.hadoop.hdds.scm.container.ContainerManager;
|
||||
import org.apache.hadoop.hdds.scm.container.ReplicationManager;
|
||||
import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
|
||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||
.ContainerPlacementPolicy;
|
||||
import org.apache.hadoop.hdds.scm.events.SCMEvents;
|
||||
|
@ -31,6 +32,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
|
|||
import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
|
||||
import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
|
||||
import org.apache.hadoop.hdds.server.events.EventQueue;
|
||||
import org.apache.hadoop.ozone.lock.LockManager;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -66,9 +68,10 @@ public class TestSafeModeHandler {
|
|||
Mockito.mock(ContainerManager.class);
|
||||
Mockito.when(containerManager.getContainerIDs())
|
||||
.thenReturn(new HashSet<>());
|
||||
replicationManager = new ReplicationManager(configuration,
|
||||
replicationManager = new ReplicationManager(
|
||||
new ReplicationManagerConfiguration(),
|
||||
containerManager, Mockito.mock(ContainerPlacementPolicy.class),
|
||||
eventQueue);
|
||||
eventQueue, new LockManager(configuration));
|
||||
scmPipelineManager = Mockito.mock(SCMPipelineManager.class);
|
||||
blockManager = Mockito.mock(BlockManagerImpl.class);
|
||||
safeModeHandler =
|
||||
|
|
Loading…
Reference in New Issue