YARN-6858. Attribute Manager to store and provide node attributes in RM. Contributed by Naganarasimha G R.
This commit is contained in:
parent
d312b5cf9f
commit
2f7712be09
|
@ -120,7 +120,13 @@ public class NodeAttributePBImpl extends NodeAttribute {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return getProto().hashCode();
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((getAttributePrefix() == null) ? 0
|
||||
: getAttributePrefix().hashCode());
|
||||
result = prime * result
|
||||
+ ((getAttributeName() == null) ? 0 : getAttributeName().hashCode());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -133,15 +139,12 @@ public class NodeAttributePBImpl extends NodeAttribute {
|
|||
}
|
||||
if (obj instanceof NodeAttribute) {
|
||||
NodeAttribute other = (NodeAttribute) obj;
|
||||
if (!compare(getAttributePrefix(), other.getAttributePrefix())) {
|
||||
return false;
|
||||
}
|
||||
if (!compare(getAttributeName(), other.getAttributeName())) {
|
||||
return false;
|
||||
}
|
||||
if (!compare(getAttributeValue(), other.getAttributeValue())) {
|
||||
return false;
|
||||
}
|
||||
if (!compare(getAttributeType(), other.getAttributeType())) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.nodelabels;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
|
||||
/**
|
||||
* Generic class capturing the information required commonly across Partitions
|
||||
* and Attributes.
|
||||
*/
|
||||
public abstract class AbstractLabel {
|
||||
|
||||
private Resource resource;
|
||||
private int numActiveNMs;
|
||||
private String labelName;
|
||||
|
||||
public AbstractLabel() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AbstractLabel(String labelName) {
|
||||
this(labelName, Resource.newInstance(0, 0), 0);
|
||||
}
|
||||
|
||||
public AbstractLabel(String labelName, Resource resource, int numActiveNMs) {
|
||||
super();
|
||||
this.resource = resource;
|
||||
this.numActiveNMs = numActiveNMs;
|
||||
this.labelName = labelName;
|
||||
}
|
||||
|
||||
public void addNode(Resource nodeRes) {
|
||||
Resources.addTo(resource, nodeRes);
|
||||
numActiveNMs++;
|
||||
}
|
||||
|
||||
public void removeNode(Resource nodeRes) {
|
||||
Resources.subtractFrom(resource, nodeRes);
|
||||
numActiveNMs--;
|
||||
}
|
||||
|
||||
public Resource getResource() {
|
||||
return Resource.newInstance(this.resource);
|
||||
}
|
||||
|
||||
public int getNumActiveNMs() {
|
||||
return numActiveNMs;
|
||||
}
|
||||
|
||||
public String getLabelName() {
|
||||
return labelName;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.nodelabels;
|
||||
|
||||
/**
|
||||
* Operations which are allowed in Node Attributes Expression.
|
||||
*/
|
||||
public enum AttributeExpressionOperation {
|
||||
LT, GT, IN, NOTIN
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.nodelabels;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Interface to capture operations on AttributeValue.
|
||||
*/
|
||||
public interface AttributeValue {
|
||||
|
||||
/**
|
||||
* @return original value which was set.
|
||||
*/
|
||||
String getValue();
|
||||
|
||||
/**
|
||||
* validate the value based on the type and initialize for further compare
|
||||
* operations.
|
||||
*
|
||||
* @param value
|
||||
* @throws IOException
|
||||
*/
|
||||
void validateAndInitializeValue(String value) throws IOException;
|
||||
|
||||
/**
|
||||
* compare the value against the other based on the
|
||||
* AttributeExpressionOperation.
|
||||
*
|
||||
* @param other
|
||||
* @param op
|
||||
* @return true if value <code>other</code> matches the current value for the
|
||||
* operation <code>op</code>.
|
||||
*/
|
||||
boolean compareForOperation(AttributeValue other,
|
||||
AttributeExpressionOperation op);
|
||||
}
|
|
@ -35,7 +35,6 @@ import java.util.concurrent.ConcurrentMap;
|
|||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -65,15 +64,12 @@ import com.google.common.collect.ImmutableSet;
|
|||
@Private
|
||||
public class CommonNodeLabelsManager extends AbstractService {
|
||||
protected static final Log LOG = LogFactory.getLog(CommonNodeLabelsManager.class);
|
||||
private static final int MAX_LABEL_LENGTH = 255;
|
||||
public static final Set<String> EMPTY_STRING_SET = Collections
|
||||
.unmodifiableSet(new HashSet<String>(0));
|
||||
public static final Set<NodeLabel> EMPTY_NODELABEL_SET = Collections
|
||||
.unmodifiableSet(new HashSet<NodeLabel>(0));
|
||||
public static final String ANY = "*";
|
||||
public static final Set<String> ACCESS_ANY_LABEL_SET = ImmutableSet.of(ANY);
|
||||
private static final Pattern LABEL_PATTERN = Pattern
|
||||
.compile("^[0-9a-zA-Z][0-9a-zA-Z-_]*");
|
||||
public static final int WILDCARD_PORT = 0;
|
||||
// Flag to identify startup for removelabel
|
||||
private boolean initNodeLabelStoreInProgress = false;
|
||||
|
@ -112,7 +108,7 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
/**
|
||||
* A <code>Host</code> can have multiple <code>Node</code>s
|
||||
*/
|
||||
protected static class Host {
|
||||
public static class Host {
|
||||
public Set<String> labels;
|
||||
public Map<NodeId, Node> nms;
|
||||
|
||||
|
@ -317,7 +313,7 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
// do a check before actual adding them, will throw exception if any of them
|
||||
// doesn't meet label name requirement
|
||||
for (NodeLabel label : labels) {
|
||||
checkAndThrowLabelName(label.getName());
|
||||
NodeLabelUtil.checkAndThrowLabelName(label.getName());
|
||||
}
|
||||
|
||||
for (NodeLabel label : labels) {
|
||||
|
@ -969,22 +965,6 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
}
|
||||
}
|
||||
|
||||
public static void checkAndThrowLabelName(String label) throws IOException {
|
||||
if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) {
|
||||
throw new IOException("label added is empty or exceeds "
|
||||
+ MAX_LABEL_LENGTH + " character(s)");
|
||||
}
|
||||
label = label.trim();
|
||||
|
||||
boolean match = LABEL_PATTERN.matcher(label).matches();
|
||||
|
||||
if (!match) {
|
||||
throw new IOException("label name should only contains "
|
||||
+ "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
|
||||
+ ", now it is=" + label);
|
||||
}
|
||||
}
|
||||
|
||||
private void checkExclusivityMatch(Collection<NodeLabel> labels)
|
||||
throws IOException {
|
||||
ArrayList<NodeLabel> mismatchlabels = new ArrayList<NodeLabel>();
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.nodelabels;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.apache.hadoop.yarn.api.records.NodeAttribute;
|
||||
|
||||
/**
|
||||
* This class captures all interactions for Attributes with RM.
|
||||
*/
|
||||
public abstract class NodeAttributesManager extends AbstractService {
|
||||
public NodeAttributesManager(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* To completely replace the mappings for a given node with the new Set of
|
||||
* Attributes. If the mapping contains an attribute whose type does not match
|
||||
* a previously existing Attribute under the same prefix (name space) then
|
||||
* exception is thrown. Key would be name of the node and value would be set
|
||||
* of Attributes to be mapped.
|
||||
*
|
||||
* @param nodeAttributeMapping
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract void replaceNodeAttributes(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException;
|
||||
|
||||
/**
|
||||
* It adds or updates the attribute mapping for a given node with out
|
||||
* impacting other existing attribute mapping. Key would be name of the node
|
||||
* and value would be set of Attributes to be mapped.
|
||||
*
|
||||
* @param nodeAttributeMapping
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract void addNodeAttributes(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException;
|
||||
|
||||
/**
|
||||
* It removes the specified attribute mapping for a given node with out
|
||||
* impacting other existing attribute mapping. Key would be name of the node
|
||||
* and value would be set of Attributes to be removed.
|
||||
*
|
||||
* @param nodeAttributeMapping
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract void removeNodeAttributes(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException;
|
||||
|
||||
/**
|
||||
* @param prefix set of prefix string's for which the attributes needs to
|
||||
* returned
|
||||
* @return set of node Attributes objects for the specified set of prefixes,
|
||||
* else return all
|
||||
*/
|
||||
public abstract Set<NodeAttribute> getClusterNodeAttributes(
|
||||
Set<String> prefix);
|
||||
|
||||
/**
|
||||
* Given a attribute set, return what all Nodes have attribute mapped to it.
|
||||
*
|
||||
* @return a Map, of attribute to set of hostnames
|
||||
*/
|
||||
//TODO need to handle as part of REST patch.
|
||||
/* public abstract Map<NodeAttribute, Set<String>> getAttributesToNodes(
|
||||
Set<NodeAttribute> attributes);*/
|
||||
|
||||
/**
|
||||
* NodeAttribute to AttributeValue Map.
|
||||
*
|
||||
* @return Map<NodeAttribute, AttributeValue> mapping of Attribute to Value.
|
||||
*/
|
||||
public abstract Map<NodeAttribute, AttributeValue> getAttributesForNode(
|
||||
String hostName);
|
||||
|
||||
// futuristic
|
||||
// public set<NodeId> getNodesMatchingExpression(String nodeLabelExp);
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.nodelabels;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Utility class for all NodeLabel and NodeAttribute operations.
|
||||
*/
|
||||
public final class NodeLabelUtil {
|
||||
private NodeLabelUtil() {
|
||||
}
|
||||
|
||||
private static final int MAX_LABEL_LENGTH = 255;
|
||||
private static final Pattern LABEL_OR_VALUE_PATTERN =
|
||||
Pattern.compile("^[0-9a-zA-Z][0-9a-zA-Z-_]*");
|
||||
private static final Pattern PREFIX_PATTERN =
|
||||
Pattern.compile("^[0-9a-zA-Z][0-9a-zA-Z-_\\.]*");
|
||||
|
||||
public static void checkAndThrowLabelName(String label) throws IOException {
|
||||
if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) {
|
||||
throw new IOException("label added is empty or exceeds "
|
||||
+ MAX_LABEL_LENGTH + " character(s)");
|
||||
}
|
||||
label = label.trim();
|
||||
|
||||
boolean match = LABEL_OR_VALUE_PATTERN.matcher(label).matches();
|
||||
|
||||
if (!match) {
|
||||
throw new IOException("label name should only contains "
|
||||
+ "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
|
||||
+ ", now it is= " + label);
|
||||
}
|
||||
}
|
||||
|
||||
public static void checkAndThrowAttributeValue(String value)
|
||||
throws IOException {
|
||||
if (value == null) {
|
||||
return;
|
||||
} else if (value.trim().length() > MAX_LABEL_LENGTH) {
|
||||
throw new IOException("Attribute value added exceeds " + MAX_LABEL_LENGTH
|
||||
+ " character(s)");
|
||||
|
||||
}
|
||||
value = value.trim();
|
||||
if(value.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
boolean match = LABEL_OR_VALUE_PATTERN.matcher(value).matches();
|
||||
|
||||
if (!match) {
|
||||
throw new IOException("attribute value should only contains "
|
||||
+ "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
|
||||
+ ", now it is= " + value);
|
||||
}
|
||||
}
|
||||
|
||||
public static void checkAndThrowAttributePrefix(String prefix)
|
||||
throws IOException {
|
||||
if (prefix == null) {
|
||||
throw new IOException("Attribute prefix cannot be null.");
|
||||
}
|
||||
if (prefix.trim().length() > MAX_LABEL_LENGTH) {
|
||||
throw new IOException("Attribute value added exceeds " + MAX_LABEL_LENGTH
|
||||
+ " character(s)");
|
||||
}
|
||||
prefix = prefix.trim();
|
||||
if(prefix.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
boolean match = PREFIX_PATTERN.matcher(prefix).matches();
|
||||
|
||||
if (!match) {
|
||||
throw new IOException("attribute value should only contains "
|
||||
+ "{0-9, a-z, A-Z, -, _,.} and should not started with {-,_}"
|
||||
+ ", now it is= " + prefix);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.nodelabels;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.NodeAttribute;
|
||||
import org.apache.hadoop.yarn.api.records.NodeAttributeType;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
|
||||
/**
|
||||
* Reference of NodeAttribute in RM.
|
||||
*/
|
||||
public class RMNodeAttribute extends AbstractLabel {
|
||||
|
||||
private NodeAttribute attribute;
|
||||
// TODO need to revisit whether we need to make this concurrent implementation
|
||||
private Set<String> nodes = new HashSet<>();
|
||||
|
||||
public RMNodeAttribute(NodeAttribute attribute) {
|
||||
this(attribute.getAttributeName(), Resource.newInstance(0, 0), 0,
|
||||
attribute);
|
||||
}
|
||||
|
||||
public RMNodeAttribute(String labelName, Resource res, int activeNMs,
|
||||
NodeAttribute attribute) {
|
||||
super(labelName, res, activeNMs);
|
||||
this.attribute = attribute;
|
||||
}
|
||||
|
||||
public NodeAttribute getAttribute() {
|
||||
return attribute;
|
||||
}
|
||||
|
||||
public void setAttribute(NodeAttribute attribute) {
|
||||
this.attribute = attribute;
|
||||
}
|
||||
|
||||
public RMNodeAttribute(String attributeName) {
|
||||
super(attributeName);
|
||||
attribute = NodeAttribute.newInstance(attributeName,
|
||||
NodeAttributeType.STRING, CommonNodeLabelsManager.NO_LABEL);
|
||||
}
|
||||
|
||||
public NodeAttributeType getAttributeType() {
|
||||
return attribute.getAttributeType();
|
||||
}
|
||||
|
||||
public void addNode(String node) {
|
||||
nodes.add(node);
|
||||
}
|
||||
|
||||
public void removeNode(String node) {
|
||||
nodes.remove(node);
|
||||
}
|
||||
|
||||
public Set<String> getAssociatedNodeIds() {
|
||||
return new HashSet<String>(nodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return attribute.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
RMNodeAttribute other = (RMNodeAttribute) obj;
|
||||
if (attribute == null) {
|
||||
if (other.attribute != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!attribute.equals(other.attribute)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -27,13 +27,13 @@ import org.apache.hadoop.yarn.api.records.NodeLabel;
|
|||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
|
||||
public class RMNodeLabel implements Comparable<RMNodeLabel> {
|
||||
private Resource resource;
|
||||
private int numActiveNMs;
|
||||
private String labelName;
|
||||
private Set<NodeId> nodeIds;
|
||||
/**
|
||||
* Partition representation in RM.
|
||||
*/
|
||||
public class RMNodeLabel extends AbstractLabel implements Comparable<RMNodeLabel> {
|
||||
private boolean exclusive;
|
||||
private NodeLabel nodeLabel;
|
||||
private Set<NodeId> nodeIds;
|
||||
|
||||
public RMNodeLabel(NodeLabel nodeLabel) {
|
||||
this(nodeLabel.getName(), Resource.newInstance(0, 0), 0,
|
||||
|
@ -47,12 +47,60 @@ public class RMNodeLabel implements Comparable<RMNodeLabel> {
|
|||
|
||||
protected RMNodeLabel(String labelName, Resource res, int activeNMs,
|
||||
boolean exclusive) {
|
||||
this.labelName = labelName;
|
||||
this.resource = res;
|
||||
this.numActiveNMs = activeNMs;
|
||||
this.nodeIds = new HashSet<NodeId>();
|
||||
super(labelName, res, activeNMs);
|
||||
this.exclusive = exclusive;
|
||||
this.nodeLabel = NodeLabel.newInstance(labelName, exclusive);
|
||||
nodeIds = new HashSet<NodeId>();
|
||||
}
|
||||
|
||||
public void setIsExclusive(boolean exclusive) {
|
||||
this.exclusive = exclusive;
|
||||
}
|
||||
|
||||
public boolean getIsExclusive() {
|
||||
return this.exclusive;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj instanceof RMNodeLabel) {
|
||||
RMNodeLabel other = (RMNodeLabel) obj;
|
||||
return Resources.equals(getResource(), other.getResource())
|
||||
&& StringUtils.equals(getLabelName(), other.getLabelName())
|
||||
&& (other.getNumActiveNMs() == getNumActiveNMs());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
public RMNodeLabel getCopy() {
|
||||
return new RMNodeLabel(getLabelName(), getResource(), getNumActiveNMs(),
|
||||
exclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 502357;
|
||||
return (int) ((((long) getLabelName().hashCode() << 8)
|
||||
+ (getResource().hashCode() << 4) + getNumActiveNMs()) % prime);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int compareTo(RMNodeLabel o) {
|
||||
// We should always put empty label entry first after sorting
|
||||
if (getLabelName().isEmpty() != o.getLabelName().isEmpty()) {
|
||||
if (getLabelName().isEmpty()) {
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
return getLabelName().compareTo(o.getLabelName());
|
||||
}
|
||||
|
||||
public NodeLabel getNodeLabel() {
|
||||
return this.nodeLabel;
|
||||
}
|
||||
|
||||
public void addNodeId(NodeId node) {
|
||||
|
@ -62,77 +110,8 @@ public class RMNodeLabel implements Comparable<RMNodeLabel> {
|
|||
public void removeNodeId(NodeId node) {
|
||||
nodeIds.remove(node);
|
||||
}
|
||||
|
||||
|
||||
public Set<NodeId> getAssociatedNodeIds() {
|
||||
return new HashSet<NodeId>(nodeIds);
|
||||
}
|
||||
|
||||
public void addNode(Resource nodeRes) {
|
||||
Resources.addTo(resource, nodeRes);
|
||||
numActiveNMs++;
|
||||
}
|
||||
|
||||
public void removeNode(Resource nodeRes) {
|
||||
Resources.subtractFrom(resource, nodeRes);
|
||||
numActiveNMs--;
|
||||
}
|
||||
|
||||
public Resource getResource() {
|
||||
return this.resource;
|
||||
}
|
||||
|
||||
public int getNumActiveNMs() {
|
||||
return numActiveNMs;
|
||||
}
|
||||
|
||||
public String getLabelName() {
|
||||
return labelName;
|
||||
}
|
||||
|
||||
public void setIsExclusive(boolean exclusive) {
|
||||
this.exclusive = exclusive;
|
||||
}
|
||||
|
||||
public boolean getIsExclusive() {
|
||||
return this.exclusive;
|
||||
}
|
||||
|
||||
public RMNodeLabel getCopy() {
|
||||
return new RMNodeLabel(labelName, resource, numActiveNMs, exclusive);
|
||||
}
|
||||
|
||||
public NodeLabel getNodeLabel() {
|
||||
return this.nodeLabel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(RMNodeLabel o) {
|
||||
// We should always put empty label entry first after sorting
|
||||
if (labelName.isEmpty() != o.getLabelName().isEmpty()) {
|
||||
if (labelName.isEmpty()) {
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
return labelName.compareTo(o.getLabelName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj instanceof RMNodeLabel) {
|
||||
RMNodeLabel other = (RMNodeLabel) obj;
|
||||
return Resources.equals(resource, other.getResource())
|
||||
&& StringUtils.equals(labelName, other.getLabelName())
|
||||
&& (other.getNumActiveNMs() == numActiveNMs);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 502357;
|
||||
return (int) ((((long) labelName.hashCode() << 8)
|
||||
+ (resource.hashCode() << 4) + numActiveNMs) % prime);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.nodelabels;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Attribute value for String NodeAttributeType.
|
||||
*/
|
||||
public class StringAttributeValue implements AttributeValue {
|
||||
private String value = "";
|
||||
|
||||
@Override
|
||||
public boolean compareForOperation(AttributeValue other,
|
||||
AttributeExpressionOperation op) {
|
||||
if (other instanceof StringAttributeValue) {
|
||||
StringAttributeValue otherString = (StringAttributeValue) other;
|
||||
switch (op) {
|
||||
case IN:
|
||||
return value.equals(otherString.value);
|
||||
case NOTIN:
|
||||
return !value.equals(otherString.value);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validateAndInitializeValue(String valueStr) throws IOException {
|
||||
NodeLabelUtil.checkAndThrowAttributeValue(valueStr);
|
||||
this.value = valueStr;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return getValue();
|
||||
}
|
||||
}
|
|
@ -34,11 +34,6 @@ import java.util.Random;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.DataInputByteBuffer;
|
||||
|
@ -64,6 +59,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
|||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
|
||||
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeLabelUtil;
|
||||
import org.apache.hadoop.yarn.server.api.ResourceManagerConstants;
|
||||
import org.apache.hadoop.yarn.server.api.ResourceTracker;
|
||||
import org.apache.hadoop.yarn.server.api.ServerRMProxy;
|
||||
|
@ -76,23 +72,27 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResp
|
|||
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
|
||||
import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
|
||||
import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
|
||||
import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
|
||||
import org.apache.hadoop.yarn.server.api.records.MasterKey;
|
||||
import org.apache.hadoop.yarn.server.api.records.NodeAction;
|
||||
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
|
||||
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
|
||||
import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
|
||||
import org.apache.hadoop.yarn.util.YarnVersionInfo;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
|
@ -1012,7 +1012,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
|
|||
StringBuilder errorMsg = new StringBuilder("");
|
||||
while (iterator.hasNext()) {
|
||||
try {
|
||||
CommonNodeLabelsManager
|
||||
NodeLabelUtil
|
||||
.checkAndThrowLabelName(iterator.next().getName());
|
||||
} catch (IOException e) {
|
||||
errorMsg.append(e.getMessage());
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
|
|||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
|
||||
|
@ -101,6 +102,7 @@ public class RMActiveServiceContext {
|
|||
private ApplicationMasterService applicationMasterService;
|
||||
|
||||
private RMNodeLabelsManager nodeLabelManager;
|
||||
private NodeAttributesManager nodeAttributesManager;
|
||||
private RMDelegatedNodeLabelsUpdater rmDelegatedNodeLabelsUpdater;
|
||||
private long epoch;
|
||||
private Clock systemClock = SystemClock.getInstance();
|
||||
|
@ -405,6 +407,18 @@ public class RMActiveServiceContext {
|
|||
nodeLabelManager = mgr;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public NodeAttributesManager getNodeAttributesManager() {
|
||||
return nodeAttributesManager;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public void setNodeAttributesManager(NodeAttributesManager mgr) {
|
||||
nodeAttributesManager = mgr;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public AllocationTagsManager getAllocationTagsManager() {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
|
|||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||
|
@ -133,6 +134,10 @@ public interface RMContext extends ApplicationMasterServiceContext {
|
|||
|
||||
public void setNodeLabelManager(RMNodeLabelsManager mgr);
|
||||
|
||||
NodeAttributesManager getNodeAttributesManager();
|
||||
|
||||
void setNodeAttributesManager(NodeAttributesManager mgr);
|
||||
|
||||
RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater();
|
||||
|
||||
void setRMDelegatedNodeLabelsUpdater(
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
|
|||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||
|
@ -504,6 +505,11 @@ public class RMContextImpl implements RMContext {
|
|||
activeServiceContext.setNodeLabelManager(mgr);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNodeAttributesManager(NodeAttributesManager mgr) {
|
||||
activeServiceContext.setNodeAttributesManager(mgr);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocationTagsManager getAllocationTagsManager() {
|
||||
return activeServiceContext.getAllocationTagsManager();
|
||||
|
@ -632,4 +638,9 @@ public class RMContextImpl implements RMContext {
|
|||
this.activeServiceContext.setResourceProfilesManager(mgr);
|
||||
}
|
||||
// Note: Read java doc before adding any services over here.
|
||||
|
||||
@Override
|
||||
public NodeAttributesManager getNodeAttributesManager() {
|
||||
return activeServiceContext.getNodeAttributesManager();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,15 +62,17 @@ import org.apache.hadoop.yarn.event.EventDispatcher;
|
|||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.federation.FederationStateStoreService;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.CombinedSystemMetricsPublisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.NoOpSystemMetricPublisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV1Publisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV2Publisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.CombinedSystemMetricsPublisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NodeAttributesManagerImpl;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
|
||||
|
@ -517,6 +519,11 @@ public class ResourceManager extends CompositeService
|
|||
return new RMNodeLabelsManager();
|
||||
}
|
||||
|
||||
protected NodeAttributesManager createNodeAttributesManager()
|
||||
throws InstantiationException, IllegalAccessException {
|
||||
return new NodeAttributesManagerImpl();
|
||||
}
|
||||
|
||||
protected AllocationTagsManager createAllocationTagsManager() {
|
||||
return new AllocationTagsManager(this.rmContext);
|
||||
}
|
||||
|
@ -656,6 +663,10 @@ public class ResourceManager extends CompositeService
|
|||
addService(nlm);
|
||||
rmContext.setNodeLabelManager(nlm);
|
||||
|
||||
NodeAttributesManager nam = createNodeAttributesManager();
|
||||
addService(nam);
|
||||
rmContext.setNodeAttributesManager(nam);
|
||||
|
||||
AllocationTagsManager allocationTagsManager =
|
||||
createAllocationTagsManager();
|
||||
rmContext.setAllocationTagsManager(allocationTagsManager);
|
||||
|
|
|
@ -0,0 +1,527 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.nodelabels;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.NodeAttribute;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.nodelabels.AttributeValue;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeLabelUtil;
|
||||
import org.apache.hadoop.yarn.nodelabels.RMNodeAttribute;
|
||||
import org.apache.hadoop.yarn.nodelabels.StringAttributeValue;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
|
||||
|
||||
/**
|
||||
* Manager holding the attributes to Labels.
|
||||
*/
|
||||
public class NodeAttributesManagerImpl extends NodeAttributesManager {
|
||||
protected static final Log LOG =
|
||||
LogFactory.getLog(NodeAttributesManagerImpl.class);
|
||||
/**
|
||||
* If a user doesn't specify value for a label, then empty string is
|
||||
* considered as default.
|
||||
*/
|
||||
public static final String EMPTY_ATTRIBUTE_VALUE = "";
|
||||
|
||||
private Dispatcher dispatcher;
|
||||
|
||||
// TODO may be we can have a better collection here.
|
||||
// this will be updated to get the attributeName to NM mapping
|
||||
private ConcurrentHashMap<NodeAttribute, RMNodeAttribute> clusterAttributes =
|
||||
new ConcurrentHashMap<>();
|
||||
|
||||
// hostname -> (Map (attributeName -> NodeAttribute))
|
||||
// Instead of NodeAttribute, plan to have it in future as AttributeValue
|
||||
// AttributeValue
|
||||
// / \
|
||||
// StringNodeAttributeValue LongAttributeValue
|
||||
// and convert the configured value to the specific type so that the
|
||||
// expression evaluations are faster
|
||||
private ConcurrentMap<String, Host> nodeCollections =
|
||||
new ConcurrentHashMap<>();
|
||||
|
||||
private final ReadLock readLock;
|
||||
private final WriteLock writeLock;
|
||||
|
||||
public NodeAttributesManagerImpl() {
|
||||
super("NodeAttributesManagerImpl");
|
||||
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
readLock = lock.readLock();
|
||||
writeLock = lock.writeLock();
|
||||
}
|
||||
|
||||
protected void initDispatcher(Configuration conf) {
|
||||
// create async handler
|
||||
dispatcher = new AsyncDispatcher("AttributeNodeLabelsManager dispatcher");
|
||||
AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
|
||||
asyncDispatcher.init(conf);
|
||||
asyncDispatcher.setDrainEventsOnStop();
|
||||
}
|
||||
|
||||
protected void startDispatcher() {
|
||||
// start dispatcher
|
||||
AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
|
||||
asyncDispatcher.start();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void serviceStart() throws Exception {
|
||||
initNodeAttributeStore(getConfig());
|
||||
// init dispatcher only when service start, because recover will happen in
|
||||
// service init, we don't want to trigger any event handling at that time.
|
||||
initDispatcher(getConfig());
|
||||
|
||||
if (null != dispatcher) {
|
||||
dispatcher.register(NodeAttributesStoreEventType.class,
|
||||
new ForwardingEventHandler());
|
||||
}
|
||||
|
||||
startDispatcher();
|
||||
super.serviceStart();
|
||||
}
|
||||
|
||||
protected void initNodeAttributeStore(Configuration conf) throws Exception {
|
||||
// TODO to generalize and make use of the FileSystemNodeLabelsStore
|
||||
}
|
||||
|
||||
private void internalUpdateAttributesOnNodes(
|
||||
Map<String, Map<NodeAttribute, AttributeValue>> nodeAttributeMapping,
|
||||
AttributeMappingOperationType op,
|
||||
Map<NodeAttribute, RMNodeAttribute> newAttributesToBeAdded) {
|
||||
try {
|
||||
writeLock.lock();
|
||||
|
||||
// shows node->attributes Mapped as part of this operation.
|
||||
StringBuilder logMsg = new StringBuilder(op.name());
|
||||
logMsg.append(" attributes on nodes:");
|
||||
// do update labels from nodes
|
||||
for (Entry<String, Map<NodeAttribute, AttributeValue>> entry : nodeAttributeMapping
|
||||
.entrySet()) {
|
||||
String nodeHost = entry.getKey();
|
||||
Map<NodeAttribute, AttributeValue> attributes = entry.getValue();
|
||||
|
||||
Host node = nodeCollections.get(nodeHost);
|
||||
if (node == null) {
|
||||
node = new Host(nodeHost);
|
||||
}
|
||||
switch (op) {
|
||||
case REMOVE:
|
||||
removeNodeFromAttributes(nodeHost, attributes.keySet());
|
||||
node.removeAttributes(attributes);
|
||||
break;
|
||||
case ADD:
|
||||
clusterAttributes.putAll(newAttributesToBeAdded);
|
||||
addNodeToAttribute(nodeHost, attributes);
|
||||
node.addAttributes(attributes);
|
||||
break;
|
||||
case REPLACE:
|
||||
clusterAttributes.putAll(newAttributesToBeAdded);
|
||||
replaceNodeToAttribute(nodeHost, node.getAttributes(), attributes);
|
||||
node.replaceAttributes(attributes);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
logMsg.append(" NM = ");
|
||||
logMsg.append(entry.getKey());
|
||||
logMsg.append(", attributes=[ ");
|
||||
logMsg.append(StringUtils.join(entry.getValue().entrySet(), ","));
|
||||
logMsg.append("] ,");
|
||||
}
|
||||
|
||||
LOG.info(logMsg);
|
||||
|
||||
if (null != dispatcher) {
|
||||
dispatcher.getEventHandler()
|
||||
.handle(new NodeAttributesStoreEvent(nodeAttributeMapping, op));
|
||||
}
|
||||
|
||||
} finally {
|
||||
writeLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private void removeNodeFromAttributes(String nodeHost,
|
||||
Set<NodeAttribute> attributeMappings) {
|
||||
for (NodeAttribute attribute : attributeMappings) {
|
||||
clusterAttributes.get(attribute).removeNode(nodeHost);
|
||||
}
|
||||
}
|
||||
|
||||
private void addNodeToAttribute(String nodeHost,
|
||||
Map<NodeAttribute, AttributeValue> attributeMappings) {
|
||||
for (NodeAttribute attribute : attributeMappings.keySet()) {
|
||||
clusterAttributes.get(attribute).addNode(nodeHost);
|
||||
}
|
||||
}
|
||||
|
||||
private void replaceNodeToAttribute(String nodeHost,
|
||||
Map<NodeAttribute, AttributeValue> oldAttributeMappings,
|
||||
Map<NodeAttribute, AttributeValue> newAttributeMappings) {
|
||||
if (oldAttributeMappings != null) {
|
||||
removeNodeFromAttributes(nodeHost, oldAttributeMappings.keySet());
|
||||
}
|
||||
addNodeToAttribute(nodeHost, newAttributeMappings);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param nodeAttributeMapping
|
||||
* @param newAttributesToBeAdded
|
||||
* @param isRemoveOperation : to indicate whether its a remove operation.
|
||||
* @return Map<String, Map<NodeAttribute, AttributeValue>>, node -> Map(
|
||||
* NodeAttribute -> AttributeValue)
|
||||
* @throws IOException : on invalid mapping in the current request or against
|
||||
* already existing NodeAttributes.
|
||||
*/
|
||||
protected Map<String, Map<NodeAttribute, AttributeValue>> validate(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping,
|
||||
Map<NodeAttribute, RMNodeAttribute> newAttributesToBeAdded,
|
||||
boolean isRemoveOperation) throws IOException {
|
||||
Map<String, Map<NodeAttribute, AttributeValue>> nodeToAttributesMap =
|
||||
new TreeMap<>();
|
||||
Map<NodeAttribute, AttributeValue> attributesValues;
|
||||
Set<Entry<String, Set<NodeAttribute>>> entrySet =
|
||||
nodeAttributeMapping.entrySet();
|
||||
for (Entry<String, Set<NodeAttribute>> nodeToAttrMappingEntry : entrySet) {
|
||||
attributesValues = new HashMap<>();
|
||||
String node = nodeToAttrMappingEntry.getKey().trim();
|
||||
if (nodeToAttrMappingEntry.getValue().isEmpty()) {
|
||||
// no attributes to map mostly remove operation
|
||||
continue;
|
||||
}
|
||||
|
||||
// validate for attributes
|
||||
for (NodeAttribute attribute : nodeToAttrMappingEntry.getValue()) {
|
||||
String attributeName = attribute.getAttributeName().trim();
|
||||
NodeLabelUtil.checkAndThrowLabelName(attributeName);
|
||||
NodeLabelUtil
|
||||
.checkAndThrowAttributePrefix(attribute.getAttributePrefix());
|
||||
|
||||
// ensure trimmed values are set back
|
||||
attribute.setAttributeName(attributeName);
|
||||
attribute.setAttributePrefix(attribute.getAttributePrefix().trim());
|
||||
|
||||
// verify for type against prefix/attributeName
|
||||
if (validateForAttributeTypeMismatch(isRemoveOperation, attribute,
|
||||
newAttributesToBeAdded)) {
|
||||
newAttributesToBeAdded.put(attribute,
|
||||
new RMNodeAttribute(attribute));
|
||||
}
|
||||
// TODO type based value setting needs to be done using a factory
|
||||
StringAttributeValue value = new StringAttributeValue();
|
||||
value.validateAndInitializeValue(
|
||||
normalizeAttributeValue(attribute.getAttributeValue()));
|
||||
attributesValues.put(attribute, value);
|
||||
}
|
||||
nodeToAttributesMap.put(node, attributesValues);
|
||||
}
|
||||
return nodeToAttributesMap;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param isRemoveOperation
|
||||
* @param attribute
|
||||
* @param newAttributes
|
||||
* @return Whether its a new Attribute added
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean validateForAttributeTypeMismatch(boolean isRemoveOperation,
|
||||
NodeAttribute attribute,
|
||||
Map<NodeAttribute, RMNodeAttribute> newAttributes)
|
||||
throws IOException {
|
||||
if (isRemoveOperation && !clusterAttributes.containsKey(attribute)) {
|
||||
// no need to validate anything as its remove operation and attribute
|
||||
// doesn't exist.
|
||||
return false; // no need to add as its remove operation
|
||||
} else {
|
||||
// already existing or attribute is mapped to another Node in the
|
||||
// current command, then check whether the attribute type is matching
|
||||
NodeAttribute existingAttribute =
|
||||
(clusterAttributes.containsKey((attribute))
|
||||
? clusterAttributes.get(attribute).getAttribute()
|
||||
: (newAttributes.containsKey(attribute)
|
||||
? newAttributes.get(attribute).getAttribute()
|
||||
: null));
|
||||
if (existingAttribute == null) {
|
||||
return true;
|
||||
} else if (existingAttribute.getAttributeType() != attribute
|
||||
.getAttributeType()) {
|
||||
throw new IOException("Attribute name - type is not matching with "
|
||||
+ "already configured mapping for the attribute "
|
||||
+ attribute.getAttributeName() + " existing : "
|
||||
+ existingAttribute.getAttributeType() + ", new :"
|
||||
+ attribute.getAttributeType());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
protected String normalizeAttributeValue(String value) {
|
||||
if (value != null) {
|
||||
return value.trim();
|
||||
}
|
||||
return EMPTY_ATTRIBUTE_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<NodeAttribute> getClusterNodeAttributes(Set<String> prefix) {
|
||||
Set<NodeAttribute> attributes = new HashSet<>();
|
||||
try {
|
||||
readLock.lock();
|
||||
attributes.addAll(clusterAttributes.keySet());
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
if (prefix != null && prefix.isEmpty()) {
|
||||
Iterator<NodeAttribute> iterator = attributes.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
NodeAttribute attribute = iterator.next();
|
||||
if (!prefix.contains(attribute.getAttributePrefix())) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
return attributes;
|
||||
}
|
||||
|
||||
// TODO need to handle as part of REST patch.
|
||||
/*
|
||||
* @Override public Map<NodeAttribute, Set<String>> getAttributesToNodes(
|
||||
* Set<NodeAttribute> attributes) { try { readLock.lock(); boolean
|
||||
* fetchAllAttributes = (attributes == null || attributes.isEmpty());
|
||||
* Map<NodeAttribute, Set<String>> attributesToNodes = new HashMap<>(); for
|
||||
* (Entry<NodeAttribute, RMAttributeNodeLabel> attributeEntry :
|
||||
* attributeCollections .entrySet()) { if (fetchAllAttributes ||
|
||||
* attributes.contains(attributeEntry.getKey())) {
|
||||
* attributesToNodes.put(attributeEntry.getKey(),
|
||||
* attributeEntry.getValue().getAssociatedNodeIds()); } } return
|
||||
* attributesToNodes; } finally { readLock.unlock(); } }
|
||||
*/
|
||||
|
||||
public Resource getResourceByAttribute(NodeAttribute attribute) {
|
||||
try {
|
||||
readLock.lock();
|
||||
return clusterAttributes.containsKey(attribute)
|
||||
? clusterAttributes.get(attribute).getResource()
|
||||
: Resource.newInstance(0, 0);
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<NodeAttribute, AttributeValue> getAttributesForNode(
|
||||
String hostName) {
|
||||
try {
|
||||
readLock.lock();
|
||||
return nodeCollections.containsKey(hostName)
|
||||
? nodeCollections.get(hostName).getAttributes()
|
||||
: new HashMap<>();
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public void activateNode(NodeId nodeId, Resource resource) {
|
||||
try {
|
||||
writeLock.lock();
|
||||
String hostName = nodeId.getHost();
|
||||
Host host = nodeCollections.get(hostName);
|
||||
if (host == null) {
|
||||
host = new Host(hostName);
|
||||
nodeCollections.put(hostName, host);
|
||||
}
|
||||
host.activateNode(resource);
|
||||
for (NodeAttribute attribute : host.getAttributes().keySet()) {
|
||||
clusterAttributes.get(attribute).removeNode(resource);
|
||||
}
|
||||
} finally {
|
||||
writeLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public void deactivateNode(NodeId nodeId) {
|
||||
try {
|
||||
writeLock.lock();
|
||||
Host host = nodeCollections.get(nodeId.getHost());
|
||||
for (NodeAttribute attribute : host.getAttributes().keySet()) {
|
||||
clusterAttributes.get(attribute).removeNode(host.getResource());
|
||||
}
|
||||
host.deactivateNode();
|
||||
} finally {
|
||||
writeLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public void updateNodeResource(NodeId node, Resource newResource) {
|
||||
deactivateNode(node);
|
||||
activateNode(node, newResource);
|
||||
}
|
||||
|
||||
/**
|
||||
* A <code>Host</code> can have multiple <code>Node</code>s.
|
||||
*/
|
||||
public static class Host {
|
||||
private String hostName;
|
||||
private Map<NodeAttribute, AttributeValue> attributes;
|
||||
private Resource resource;
|
||||
private boolean isActive;
|
||||
|
||||
private Map<NodeAttribute, AttributeValue> getAttributes() {
|
||||
return attributes;
|
||||
}
|
||||
|
||||
public void setAttributes(Map<NodeAttribute, AttributeValue> attributes) {
|
||||
this.attributes = attributes;
|
||||
}
|
||||
|
||||
public void removeAttributes(
|
||||
Map<NodeAttribute, AttributeValue> attributesMapping) {
|
||||
for (NodeAttribute attribute : attributesMapping.keySet()) {
|
||||
this.attributes.remove(attribute);
|
||||
}
|
||||
}
|
||||
|
||||
public void replaceAttributes(
|
||||
Map<NodeAttribute, AttributeValue> attributesMapping) {
|
||||
this.attributes.clear();
|
||||
this.attributes.putAll(attributesMapping);
|
||||
}
|
||||
|
||||
public void addAttributes(
|
||||
Map<NodeAttribute, AttributeValue> attributesMapping) {
|
||||
this.attributes.putAll(attributesMapping);
|
||||
}
|
||||
|
||||
public Resource getResource() {
|
||||
return resource;
|
||||
}
|
||||
|
||||
public void setResource(Resource resourceParam) {
|
||||
this.resource = resourceParam;
|
||||
}
|
||||
|
||||
public boolean isActive() {
|
||||
return isActive;
|
||||
}
|
||||
|
||||
public void deactivateNode() {
|
||||
this.isActive = false;
|
||||
this.resource = Resource.newInstance(0, 0);
|
||||
}
|
||||
|
||||
public void activateNode(Resource r) {
|
||||
this.isActive = true;
|
||||
this.resource = r;
|
||||
}
|
||||
|
||||
public String getHostName() {
|
||||
return hostName;
|
||||
}
|
||||
|
||||
public void setHostName(String hostName) {
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public Host(String hostName) {
|
||||
this(hostName, new HashMap<NodeAttribute, AttributeValue>());
|
||||
}
|
||||
|
||||
public Host(String hostName,
|
||||
Map<NodeAttribute, AttributeValue> attributes) {
|
||||
this(hostName, attributes, Resource.newInstance(0, 0), false);
|
||||
}
|
||||
|
||||
public Host(String hostName, Map<NodeAttribute, AttributeValue> attributes,
|
||||
Resource resource, boolean isActive) {
|
||||
super();
|
||||
this.attributes = attributes;
|
||||
this.resource = resource;
|
||||
this.isActive = isActive;
|
||||
this.hostName = hostName;
|
||||
}
|
||||
}
|
||||
|
||||
private final class ForwardingEventHandler
|
||||
implements EventHandler<NodeAttributesStoreEvent> {
|
||||
|
||||
@Override
|
||||
public void handle(NodeAttributesStoreEvent event) {
|
||||
handleStoreEvent(event);
|
||||
}
|
||||
}
|
||||
|
||||
// Dispatcher related code
|
||||
protected void handleStoreEvent(NodeAttributesStoreEvent event) {
|
||||
// TODO Need to extend the File
|
||||
}
|
||||
|
||||
@Override
|
||||
public void replaceNodeAttributes(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException {
|
||||
processMapping(nodeAttributeMapping, AttributeMappingOperationType.REPLACE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addNodeAttributes(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException {
|
||||
processMapping(nodeAttributeMapping, AttributeMappingOperationType.ADD);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNodeAttributes(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException {
|
||||
processMapping(nodeAttributeMapping, AttributeMappingOperationType.REMOVE);
|
||||
}
|
||||
|
||||
private void processMapping(
|
||||
Map<String, Set<NodeAttribute>> nodeAttributeMapping,
|
||||
AttributeMappingOperationType mappingType) throws IOException {
|
||||
Map<NodeAttribute, RMNodeAttribute> newAttributesToBeAdded =
|
||||
new HashMap<>();
|
||||
Map<String, Map<NodeAttribute, AttributeValue>> validMapping =
|
||||
validate(nodeAttributeMapping, newAttributesToBeAdded, false);
|
||||
|
||||
internalUpdateAttributesOnNodes(validMapping, mappingType,
|
||||
newAttributesToBeAdded);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.nodelabels;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.NodeAttribute;
|
||||
import org.apache.hadoop.yarn.event.AbstractEvent;
|
||||
import org.apache.hadoop.yarn.nodelabels.AttributeValue;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
|
||||
|
||||
/**
|
||||
* Event capturing details to store the Node Attributes in the backend store.
|
||||
*/
|
||||
public class NodeAttributesStoreEvent
|
||||
extends AbstractEvent<NodeAttributesStoreEventType> {
|
||||
private Map<String, Map<NodeAttribute, AttributeValue>> nodeAttributeMapping;
|
||||
private AttributeMappingOperationType operation;
|
||||
|
||||
public NodeAttributesStoreEvent(
|
||||
Map<String, Map<NodeAttribute, AttributeValue>> nodeAttributeMappingList,
|
||||
AttributeMappingOperationType operation) {
|
||||
super(NodeAttributesStoreEventType.STORE_ATTRIBUTES);
|
||||
this.nodeAttributeMapping = nodeAttributeMappingList;
|
||||
this.operation = operation;
|
||||
}
|
||||
|
||||
public Map<String, Map<NodeAttribute, AttributeValue>> getNodeAttributeMappingList() {
|
||||
return nodeAttributeMapping;
|
||||
}
|
||||
|
||||
public AttributeMappingOperationType getOperation() {
|
||||
return operation;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.nodelabels;
|
||||
|
||||
/**
|
||||
* Event type to store the NodeAttributes.
|
||||
*/
|
||||
public enum NodeAttributesStoreEventType {
|
||||
STORE_ATTRIBUTES
|
||||
}
|
Loading…
Reference in New Issue