HDDS-1663. Add datanode to network topology cluster during node regis… (#937)

This commit is contained in:
Sammi Chen 2019-06-13 03:16:42 +08:00 committed by Xiaoyu Yao
parent cf84881dea
commit 1732312f45
17 changed files with 573 additions and 9 deletions

View File

@ -32,11 +32,23 @@ public interface Node {
* exclude itself. In another words, its parent's full network location */
String getNetworkLocation();
/**
* Set this node's network location.
* @param location it's network location
*/
void setNetworkLocation(String location);
/** @return this node's self name in network topology. This should be node's
* IP or hostname.
* */
String getNetworkName();
/**
* Set this node's name, can be hostname or Ipaddress.
* @param name it's network name
*/
void setNetworkName(String name);
/** @return this node's full path in network topology. It's the concatenation
* of location and name.
* */

View File

@ -27,11 +27,11 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
*/
public class NodeImpl implements Node {
// host:port#
private final String name;
private String name;
// string representation of this node's location, such as /dc1/rack1
private final String location;
private String location;
// location + "/" + name
private final String path;
private String path;
// which level of the tree the node resides, start from 1 for root
private int level;
// node's parent
@ -53,10 +53,7 @@ public class NodeImpl implements Node {
}
this.name = (name == null) ? ROOT : name;
this.location = NetUtils.normalize(location);
this.path = this.location.equals(PATH_SEPARATOR_STR) ?
this.location + this.name :
this.location + PATH_SEPARATOR_STR + this.name;
this.path = getPath();
this.cost = cost;
}
@ -84,6 +81,15 @@ public class NodeImpl implements Node {
return name;
}
/**
* Set this node's name, can be hostname or Ipaddress.
* @param networkName it's network name
*/
public void setNetworkName(String networkName) {
this.name = networkName;
this.path = getPath();
}
/**
* @return this node's network location
*/
@ -91,6 +97,16 @@ public class NodeImpl implements Node {
return location;
}
/**
* Set this node's network location.
* @param networkLocation it's network location
*/
@Override
public void setNetworkLocation(String networkLocation) {
this.location = networkLocation;
this.path = getPath();
}
/**
* @return this node's full path in network topology. It's the concatenation
* of location and name.
@ -197,4 +213,10 @@ public class NodeImpl implements Node {
public String toString() {
return getNetworkFullPath();
}
private String getPath() {
return this.location.equals(PATH_SEPARATOR_STR) ?
this.location + this.name :
this.location + PATH_SEPARATOR_STR + this.name;
}
}

View File

@ -145,6 +145,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<testResource>
<directory>${basedir}/../../hadoop-hdds/common/src/main/resources</directory>
</testResource>
<testResource>
<directory>${basedir}/src/test/resources</directory>
</testResource>
</testResources>
</build>
</project>

View File

@ -171,4 +171,13 @@ public interface NodeManager extends StorageContainerNodeProtocol,
*/
// TODO: We can give better name to this method!
List<SCMCommand> getCommandQueue(UUID dnID);
/**
* Given datanode host address, returns the DatanodeDetails for the
* node.
*
* @param address node host address
* @return the given datanode, or null if not found
*/
DatanodeDetails getNode(String address);
}

View File

@ -23,6 +23,9 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.net.NetConstants;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.net.Node;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
@ -44,14 +47,19 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.TableMapping;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.VersionResponse;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -93,6 +101,9 @@ public class SCMNodeManager implements NodeManager {
// Node manager MXBean
private ObjectName nmInfoBean;
private final StorageContainerManager scmManager;
private final NetworkTopology clusterMap;
private final DNSToSwitchMapping dnsToSwitchMapping;
private final boolean useHostname;
/**
* Constructs SCM machine Manager.
@ -108,6 +119,18 @@ public class SCMNodeManager implements NodeManager {
LOG.info("Entering startup safe mode.");
registerMXBean();
this.metrics = SCMNodeMetrics.create(this);
this.clusterMap = scmManager.getClusterMap();
Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
TableMapping.class, DNSToSwitchMapping.class);
DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
dnsToSwitchMappingClass, conf);
this.dnsToSwitchMapping =
((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
: new CachedDNSToSwitchMapping(newInstance));
this.useHostname = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
}
private void registerMXBean() {
@ -228,7 +251,19 @@ public class SCMNodeManager implements NodeManager {
datanodeDetails.setIpAddress(dnAddress.getHostAddress());
}
try {
String location;
if (useHostname) {
datanodeDetails.setNetworkName(datanodeDetails.getHostName());
location = nodeResolve(datanodeDetails.getHostName());
} else {
datanodeDetails.setNetworkName(datanodeDetails.getIpAddress());
location = nodeResolve(datanodeDetails.getIpAddress());
}
if (location != null) {
datanodeDetails.setNetworkLocation(location);
}
nodeStateManager.addNode(datanodeDetails);
clusterMap.add(datanodeDetails);
// Updating Node Report, as registration is successful
processNodeReport(datanodeDetails, nodeReport);
LOG.info("Registered Data node : {}", datanodeDetails);
@ -236,6 +271,7 @@ public class SCMNodeManager implements NodeManager {
LOG.trace("Datanode is already registered. Datanode: {}",
datanodeDetails.toString());
}
return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
.setDatanodeUUID(datanodeDetails.getUuidString())
.setClusterID(this.clusterID)
@ -515,5 +551,36 @@ public class SCMNodeManager implements NodeManager {
return commandQueue.getCommand(dnID);
}
/**
* Given datanode address or host name, returns the DatanodeDetails for the
* node.
*
* @param address node host address
* @return the given datanode, or null if not found
*/
@Override
public DatanodeDetails getNode(String address) {
Node node = null;
String location = nodeResolve(address);
if (location != null) {
node = clusterMap.getNode(location + NetConstants.PATH_SEPARATOR_STR +
address);
}
return node == null ? null : (DatanodeDetails)node;
}
private String nodeResolve(String hostname) {
List<String> hosts = new ArrayList<>(1);
hosts.add(hostname);
List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
String location = resolvedHosts.get(0);
LOG.debug("Resolve datanode {} return location {}", hostname, location);
return location;
} else {
LOG.error("Node {} Resolution failed. Please make sure that DNS table " +
"mapping or configured mapping is functional.", hostname);
return null;
}
}
}

View File

@ -372,6 +372,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
private void initializeSystemManagers(OzoneConfiguration conf,
SCMConfigurator configurator)
throws IOException {
clusterMap = new NetworkTopologyImpl(conf);
if(configurator.getScmNodeManager() != null) {
scmNodeManager = configurator.getScmNodeManager();
} else {
@ -379,7 +381,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
conf, scmStorageConfig.getClusterID(), this, eventQueue);
}
clusterMap = new NetworkTopologyImpl(conf);
ContainerPlacementPolicy containerPlacementPolicy =
ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager,
clusterMap, true);
@ -1067,4 +1068,12 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
public SCMMetadataStore getScmMetadataStore() {
return scmMetadataStore;
}
/**
* Returns the SCM network topology cluster.
* @return NetworkTopology
*/
public NetworkTopology getClusterMap() {
return this.clusterMap;
}
}

View File

@ -151,7 +151,7 @@ public final class TestUtils {
*
* @return DatanodeDetails
*/
private static DatanodeDetails createDatanodeDetails(String uuid,
public static DatanodeDetails createDatanodeDetails(String uuid,
String hostname, String ipAddress, String networkLocation) {
DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
DatanodeDetails.Port.Name.STANDALONE, 0);

View File

@ -451,6 +451,11 @@ public class MockNodeManager implements NodeManager {
return null;
}
@Override
public DatanodeDetails getNode(String address) {
return null;
}
/**
* A class to declare some values for the nodes so that our tests
* won't fail.

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -31,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@ -56,6 +58,10 @@ import java.util.concurrent.TimeoutException;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL;
@ -945,4 +951,110 @@ public class TestSCMNodeManager {
}
}
/**
* Test add node into network topology during node register. Datanode
* uses Ip address to resolve network location.
*/
@Test
public void testScmRegisterNodeWithIpAddress()
throws IOException, InterruptedException, AuthenticationException {
testScmRegisterNodeWithNetworkTopology(false);
}
/**
* Test add node into network topology during node register. Datanode
* uses hostname to resolve network location.
*/
@Test
public void testScmRegisterNodeWithHostname()
throws IOException, InterruptedException, AuthenticationException {
testScmRegisterNodeWithNetworkTopology(true);
}
/**
* Test add node into a 4-layer network topology during node register.
*/
@Test
public void testScmRegisterNodeWith4LayerNetworkTopology()
throws IOException, InterruptedException, AuthenticationException {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
MILLISECONDS);
// create table mapping file
String[] hostNames = {"host1", "host2", "host3", "host4"};
String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
String mapFile = this.getClass().getClassLoader()
.getResource("nodegroup-mapping").getPath();
// create and register nodes
conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
"org.apache.hadoop.net.TableMapping");
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile);
conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE,
"network-topology-nodegroup.xml");
final int nodeCount = hostNames.length;
// use default IP address to resolve node
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
for (int i = 0; i < nodeCount; i++) {
DatanodeDetails node = TestUtils.createDatanodeDetails(
UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
nodeManager.register(node, null, null);
nodes[i] = node;
}
// verify network topology cluster has all the registered nodes
Thread.sleep(4 * 1000);
NetworkTopology clusterMap = scm.getClusterMap();
assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
assertEquals(4, clusterMap.getMaxLevel());
List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
nodeList.stream().forEach(node ->
Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng")));
}
}
private void testScmRegisterNodeWithNetworkTopology(boolean useHostname)
throws IOException, InterruptedException, AuthenticationException {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
MILLISECONDS);
// create table mapping file
String[] hostNames = {"host1", "host2", "host3", "host4"};
String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
String mapFile = this.getClass().getClassLoader()
.getResource("rack-mapping").getPath();
// create and register nodes
conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
"org.apache.hadoop.net.TableMapping");
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile);
if (useHostname) {
conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true");
}
final int nodeCount = hostNames.length;
// use default IP address to resolve node
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
for (int i = 0; i < nodeCount; i++) {
DatanodeDetails node = TestUtils.createDatanodeDetails(
UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
nodeManager.register(node, null, null);
nodes[i] = node;
}
// verify network topology cluster has all the registered nodes
Thread.sleep(4 * 1000);
NetworkTopology clusterMap = scm.getClusterMap();
assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
assertEquals(3, clusterMap.getMaxLevel());
List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
nodeList.stream().forEach(node ->
Assert.assertTrue(node.getNetworkLocation().equals("/rack1")));
}
}
}

View File

@ -309,4 +309,9 @@ public class ReplicationNodeManagerMock implements NodeManager {
public List<SCMCommand> getCommandQueue(UUID dnID) {
return null;
}
@Override
public DatanodeDetails getNode(String address) {
return null;
}
}

View File

@ -0,0 +1,24 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
host1 /rack1/ng1
host2 /rack1/ng1
host3 /rack1/ng2
host4 /rack1/ng2
1.2.3.4 /rack1/ng1
2.3.4.5 /rack1/ng1
3.4.5.6 /rack1/ng2
4.5.6.7 /rack1/ng2

View File

@ -0,0 +1,24 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
host1 /rack1
host2 /rack1
host3 /rack1
host4 /rack1
1.2.3.4 /rack1
2.3.4.5 /rack1
3.4.5.6 /rack1
4.5.6.7 /rack1

View File

@ -0,0 +1,17 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HDDS_VERSION=0.5.0-SNAPSHOT

View File

@ -0,0 +1,110 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3"
services:
datanode_1:
image: apache/hadoop-runner:jdk11
privileged: true #required by the profiler
volumes:
- ../..:/opt/hadoop
ports:
- 9864
- 9882
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
networks:
service_network:
ipv4_address: 10.5.0.4
datanode_2:
image: apache/hadoop-runner:jdk11
privileged: true #required by the profiler
volumes:
- ../..:/opt/hadoop
ports:
- 9864
- 9882
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
networks:
service_network:
ipv4_address: 10.5.0.5
datanode_3:
image: apache/hadoop-runner:jdk11
privileged: true #required by the profiler
volumes:
- ../..:/opt/hadoop
ports:
- 9864
- 9882
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
networks:
service_network:
ipv4_address: 10.5.0.6
datanode_4:
image: apache/hadoop-runner:jdk11
privileged: true #required by the profiler
volumes:
- ../..:/opt/hadoop
ports:
- 9864
- 9882
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
networks:
service_network:
ipv4_address: 10.5.0.7
om:
image: apache/hadoop-runner:jdk11
privileged: true #required by the profiler
volumes:
- ../..:/opt/hadoop
ports:
- 9874:9874
environment:
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/ozone","om"]
networks:
service_network:
ipv4_address: 10.5.0.70
scm:
image: apache/hadoop-runner:jdk11
privileged: true #required by the profiler
volumes:
- ../..:/opt/hadoop
ports:
- 9876:9876
env_file:
- ./docker-config
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/ozone","scm"]
networks:
service_network:
ipv4_address: 10.5.0.71
networks:
service_network:
driver: bridge
ipam:
config:
- subnet: 10.5.0.0/16

View File

@ -0,0 +1,88 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
OZONE-SITE.XML_ozone.om.address=om
OZONE-SITE.XML_ozone.om.http-address=om:9874
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_ozone.replication=1
OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_net.topology.node.switch.mapping.impl=org.apache.hadoop.net.TableMapping
HDFS-SITE.XML_net.topology.table.file.name=/opt/hadoop/compose/ozone-net-topology/network-config
ASYNC_PROFILER_HOME=/opt/profiler
LOG4J.PROPERTIES_log4j.rootLogger=DEBUG, ARF
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
LOG4J.PROPERTIES_log4j.appender.ARF=org.apache.log4j.RollingFileAppender
LOG4J.PROPERTIES_log4j.appender.ARF.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.ARF.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
LOG4J.PROPERTIES_log4j.appender.ARF.file=/opt/hadoop/logs/${module.name}-${user.name}.log
HDDS_DN_OPTS=-Dmodule.name=datanode
HDFS_OM_OPTS=-Dmodule.name=om
HDFS_STORAGECONTAINERMANAGER_OPTS=-Dmodule.name=scm
#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
LOG4J2.PROPERTIES_monitorInterval=30
LOG4J2.PROPERTIES_filter=read,write
LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
LOG4J2.PROPERTIES_filter.read.marker=READ
LOG4J2.PROPERTIES_filter.read.onMatch=DENY
LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
LOG4J2.PROPERTIES_filter.write.marker=WRITE
LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
LOG4J2.PROPERTIES_appenders=console, rolling
LOG4J2.PROPERTIES_appender.console.type=Console
LOG4J2.PROPERTIES_appender.console.name=STDOUT
LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
LOG4J2.PROPERTIES_loggers=audit
LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
LOG4J2.PROPERTIES_logger.audit.name=OMAudit
LOG4J2.PROPERTIES_logger.audit.level=INFO
LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
LOG4J2.PROPERTIES_rootLogger.level=INFO
LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

View File

@ -0,0 +1,22 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
10.5.0.4 /rack1
10.5.0.5 /rack1
10.5.0.6 /rack1
10.5.0.7 /rack2
10.5.0.8 /rack2
10.5.0.9 /rack2

View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export COMPOSE_DIR
# shellcheck source=/dev/null
source "$COMPOSE_DIR/../testlib.sh"
start_docker_env
#Due to the limitation of the current auditparser test, it should be the
#first test in a clean cluster.
execute_robot_test om auditparser
execute_robot_test scm basic/basic.robot
stop_docker_env
generate_report