YARN-6855. [YARN-3409] CLI Proto Modifications to support Node Attributes. Contributed by Naganarasimha G R.

This commit is contained in:
Naganarasimha 2018-01-21 00:53:02 +08:00 committed by Sunil G
parent 9011567169
commit 1f42ce907a
22 changed files with 1044 additions and 15 deletions

View File

@ -0,0 +1,78 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records;
/**
* <p>
* Node Attribute is a kind of a label which represents one of the
* attribute/feature of a Node. Its different from node partition label as
* resource guarantees across the queues will not be maintained for these type
* of labels.
* </p>
* <p>
* A given Node can be mapped with any kind of attribute, few examples are
* HAS_SSD=true, JAVA_VERSION=JDK1.8, OS_TYPE=WINDOWS.
* </p>
* <p>
* Its not compulsory for all the attributes to have value, empty string is the
* default value of the <code>NodeAttributeType.STRING</code>
* </p>
*
*/
@Public
@Unstable
public abstract class NodeAttribute {
public static NodeAttribute newInstance(String attributeName,
NodeAttributeType attributeType, String attributeValue) {
NodeAttribute nodeAttribute = Records.newRecord(NodeAttribute.class);
nodeAttribute.setAttributeName(attributeName);
nodeAttribute.setAttributeType(attributeType);
nodeAttribute.setAttributeValue(attributeValue);
return nodeAttribute;
}
@Public
@Unstable
public abstract String getAttributeName();
@Public
@Unstable
public abstract void setAttributeName(String attributeName);
@Public
@Unstable
public abstract String getAttributeValue();
@Public
@Unstable
public abstract void setAttributeValue(String attributeValue);
@Public
@Unstable
public abstract NodeAttributeType getAttributeType();
@Public
@Unstable
public abstract void setAttributeType(NodeAttributeType attributeType);
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* Type of a <code>node Attribute</code>.
* </p>
* Based on this attribute expressions and values will be evaluated.
*/
@Public
@Unstable
public enum NodeAttributeType {
/** string type node attribute. */
STRING
}

View File

@ -30,6 +30,8 @@
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
@ -37,6 +39,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
@ -52,8 +56,6 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
@Private
public interface ResourceManagerAdministrationProtocol extends GetUserMappingsProtocol {
@ -144,4 +146,11 @@ public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
RefreshClusterMaxPriorityRequest request) throws YarnException,
IOException;
@Private
@Idempotent
public NodesToAttributesMappingResponse mapAttributesToNodes(
NodesToAttributesMappingRequest request) throws YarnException,
IOException;
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* Type of node to attribute mapping operation.
* </p>
*
*/
@Public
@Unstable
public enum AttributeMappingOperationType {
/** Replaces the existing node to attribute mapping with new mapping.*/
REPLACE,
/** Add attribute(s) to a node and if it already exists will update the
* value.*/
ADD,
/** Removes attribute(s) mapped to a node. */
REMOVE
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.util.Records;
/**
* Represents a mapping of Node id to list of attributes.
*/
@Public
@Unstable
public abstract class NodeToAttributes {
public static NodeToAttributes newInstance(String node,
List<NodeAttribute> attributes) {
NodeToAttributes nodeIdToAttributes =
Records.newRecord(NodeToAttributes.class);
nodeIdToAttributes.setNode(node);
nodeIdToAttributes.setNodeAttributes(attributes);
return nodeIdToAttributes;
}
@Public
@Unstable
public abstract String getNode();
@Public
@Unstable
public abstract void setNode(String node);
@Public
@Unstable
public abstract List<NodeAttribute> getNodeAttributes();
@Public
@Unstable
public abstract void setNodeAttributes(List<NodeAttribute> attributes);
}

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records;
/**
* list of node-attribute mapping request info.
*/
@Public
@Unstable
public abstract class NodesToAttributesMappingRequest {
public static NodesToAttributesMappingRequest newInstance(
AttributeMappingOperationType operation,
List<NodeToAttributes> nodesToAttributes, boolean failOnUnknownNodes) {
NodesToAttributesMappingRequest request =
Records.newRecord(NodesToAttributesMappingRequest.class);
request.setNodesToAttributes(nodesToAttributes);
request.setFailOnUnknownNodes(failOnUnknownNodes);
request.setOperation(operation);
return request;
}
@Public
@Unstable
public abstract void setNodesToAttributes(
List<NodeToAttributes> nodesToAttributes);
@Public
@Unstable
public abstract List<NodeToAttributes> getNodesToAttributes();
@Public
@Unstable
public abstract void setFailOnUnknownNodes(boolean failOnUnknownNodes);
@Public
@Unstable
public abstract boolean getFailOnUnknownNodes();
@Public
@Unstable
public abstract void setOperation(AttributeMappingOperationType operation);
@Public
@Unstable
public abstract AttributeMappingOperationType getOperation();
}

View File

@ -0,0 +1,27 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.yarn.util.Records;
public class NodesToAttributesMappingResponse {
public static NodesToAttributesMappingResponse newInstance() {
return Records.newRecord(NodesToAttributesMappingResponse.class);
}
}

View File

@ -45,4 +45,5 @@ service ResourceManagerAdministrationProtocolService {
rpc replaceLabelsOnNodes(ReplaceLabelsOnNodeRequestProto) returns (ReplaceLabelsOnNodeResponseProto);
rpc checkForDecommissioningNodes(CheckForDecommissioningNodesRequestProto) returns (CheckForDecommissioningNodesResponseProto);
rpc refreshClusterMaxPriority(RefreshClusterMaxPriorityRequestProto) returns (RefreshClusterMaxPriorityResponseProto);
rpc mapAttributesToNodes(NodesToAttributesMappingRequestProto) returns (NodesToAttributesMappingResponseProto);
}

View File

@ -130,6 +130,27 @@ enum DecommissionTypeProto {
GRACEFUL = 2;
FORCEFUL = 3;
}
enum AttributeMappingOperationTypeProto {
REPLACE = 1;
ADD = 2;
REMOVE = 3;
}
message NodesToAttributesMappingRequestProto {
optional AttributeMappingOperationTypeProto operation = 1 [default = REPLACE];
repeated NodeToAttributesProto nodeToAttributes = 2;
optional bool failOnUnknownNodes = 3;
}
message NodeToAttributesProto {
optional string node = 1;
repeated NodeAttributeProto nodeAttributes = 2;
}
message NodesToAttributesMappingResponseProto {
}
//////////////////////////////////////////////////////////////////
///////////// RM Failover related records ////////////////////////
//////////////////////////////////////////////////////////////////

View File

@ -372,6 +372,17 @@ message NodeLabelProto {
optional bool isExclusive = 2 [default = true];
}
enum NodeAttributeTypeProto {
STRING = 1;
}
message NodeAttributeProto {
optional string attributeName = 1;
optional NodeAttributeTypeProto attributeType = 2;
optional string attributeValue = 3;
}
enum ContainerTypeProto {
APPLICATION_MASTER = 1;
TASK = 2;

View File

@ -0,0 +1,155 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.records.impl.pb;
import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.NodeAttributeType;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeTypeProto;
public class NodeAttributePBImpl extends NodeAttribute {
private NodeAttributeProto proto = NodeAttributeProto.getDefaultInstance();
private NodeAttributeProto.Builder builder = null;
private boolean viaProto = false;
public NodeAttributePBImpl() {
builder = NodeAttributeProto.newBuilder();
}
public NodeAttributePBImpl(NodeAttributeProto proto) {
this.proto = proto;
viaProto = true;
}
public NodeAttributeProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = NodeAttributeProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public String getAttributeName() {
NodeAttributeProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasAttributeName()) {
return null;
}
return p.getAttributeName();
}
@Override
public void setAttributeName(String attributeName) {
maybeInitBuilder();
builder.setAttributeName(attributeName);
}
@Override
public String getAttributeValue() {
NodeAttributeProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasAttributeValue()) {
return null;
}
return p.getAttributeValue();
}
@Override
public void setAttributeValue(String attributeValue) {
maybeInitBuilder();
builder.setAttributeValue(attributeValue);
}
@Override
public NodeAttributeType getAttributeType() {
NodeAttributeProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasAttributeType()) {
return null;
}
return convertFromProtoFormat(p.getAttributeType());
}
@Override
public void setAttributeType(NodeAttributeType attributeType) {
maybeInitBuilder();
if (attributeType == null) {
builder.clearAttributeType();
return;
}
builder.setAttributeType(convertToProtoFormat(attributeType));
}
private NodeAttributeTypeProto convertToProtoFormat(
NodeAttributeType attributeType) {
return NodeAttributeTypeProto.valueOf(attributeType.name());
}
private NodeAttributeType convertFromProtoFormat(
NodeAttributeTypeProto containerState) {
return NodeAttributeType.valueOf(containerState.name());
}
@Override
public String toString() {
return " name-" + getAttributeName() + ":value-" + getAttributeValue()
+ ":type-" + getAttributeType();
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof NodeAttribute) {
NodeAttribute other = (NodeAttribute) obj;
if (!compare(getAttributeName(), other.getAttributeName())) {
return false;
}
if (!compare(getAttributeValue(), other.getAttributeValue())) {
return false;
}
if (!compare(getAttributeType(), other.getAttributeType())) {
return false;
}
return true;
}
return false;
}
private static boolean compare(Object left, Object right) {
if (left == null) {
return right == null;
} else {
return left.equals(right);
}
}
}

View File

@ -33,9 +33,11 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshClusterMaxPriorityRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
@ -43,13 +45,14 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
@ -76,11 +79,15 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
@ -96,8 +103,6 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import com.google.protobuf.ServiceException;
@ -323,4 +328,19 @@ public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
return null;
}
}
@Override
public NodesToAttributesMappingResponse mapAttributesToNodes(
NodesToAttributesMappingRequest request)
throws YarnException, IOException {
NodesToAttributesMappingRequestProto requestProto =
((NodesToAttributesMappingRequestPBImpl) request).getProto();
try {
return new NodesToAttributesMappingResponsePBImpl(
proxy.mapAttributesToNodes(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}

View File

@ -28,11 +28,15 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshClusterMaxPriorityRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshClusterMaxPriorityResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
@ -48,13 +52,13 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
@ -71,11 +75,15 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
@ -91,8 +99,6 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -336,4 +342,21 @@ public RefreshClusterMaxPriorityResponseProto refreshClusterMaxPriority(
throw new ServiceException(e);
}
}
@Override
public NodesToAttributesMappingResponseProto mapAttributesToNodes(
RpcController controller, NodesToAttributesMappingRequestProto proto)
throws ServiceException {
NodesToAttributesMappingRequest request =
new NodesToAttributesMappingRequestPBImpl(proto);
try {
NodesToAttributesMappingResponse response =
real.mapAttributesToNodes(request);
return ((NodesToAttributesMappingResponsePBImpl) response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
}

View File

@ -0,0 +1,161 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributePBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToAttributesProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToAttributesProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
public class NodeToAttributesPBImpl extends NodeToAttributes {
private NodeToAttributesProto proto =
NodeToAttributesProto.getDefaultInstance();
private NodeToAttributesProto.Builder builder = null;
private boolean viaProto = false;
private List<NodeAttribute> nodeAttributes = null;
public NodeToAttributesPBImpl() {
builder = NodeToAttributesProto.newBuilder();
}
public NodeToAttributesPBImpl(NodeToAttributesProto proto) {
this.proto = proto;
viaProto = true;
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
if (this.nodeAttributes != null) {
for (NodeAttribute nodeAttribute : nodeAttributes) {
builder.addNodeAttributes(
((NodeAttributePBImpl) nodeAttribute).getProto());
}
}
proto = builder.build();
viaProto = true;
}
public NodeToAttributesProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = NodeToAttributesProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public String getNode() {
NodeToAttributesProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNode()) {
return null;
}
return p.getNode();
}
@Override
public void setNode(String node) {
maybeInitBuilder();
builder.setNode(node);
}
private void initNodeAttributes() {
if (this.nodeAttributes != null) {
return;
}
NodeToAttributesProtoOrBuilder p = viaProto ? proto : builder;
List<NodeAttributeProto> nodeAttributesProtoList =
p.getNodeAttributesList();
List<NodeAttribute> attributes = new ArrayList<>();
if (nodeAttributesProtoList == null
|| nodeAttributesProtoList.size() == 0) {
this.nodeAttributes = attributes;
return;
}
for (NodeAttributeProto nodeAttributeProto : nodeAttributesProtoList) {
attributes.add(new NodeAttributePBImpl(nodeAttributeProto));
}
this.nodeAttributes = attributes;
}
@Override
public List<NodeAttribute> getNodeAttributes() {
initNodeAttributes();
return this.nodeAttributes;
}
@Override
public void setNodeAttributes(List<NodeAttribute> attributes) {
if (nodeAttributes == null) {
nodeAttributes = new ArrayList<>();
}
nodeAttributes.clear();
nodeAttributes.addAll(attributes);
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof NodeToAttributes) {
NodeToAttributes other = (NodeToAttributes) obj;
if (getNodeAttributes() == null) {
if (other.getNodeAttributes() != null) {
return false;
}
} else if (!getNodeAttributes().containsAll(other.getNodeAttributes())) {
return false;
}
if (getNode() == null) {
if (other.getNode() != null) {
return false;
}
} else if (!getNode().equals(other.getNode())) {
return false;
}
return true;
}
return false;
}
}

View File

@ -0,0 +1,194 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AttributeMappingOperationTypeProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToAttributesProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
public class NodesToAttributesMappingRequestPBImpl
extends NodesToAttributesMappingRequest {
private NodesToAttributesMappingRequestProto proto =
NodesToAttributesMappingRequestProto.getDefaultInstance();
private NodesToAttributesMappingRequestProto.Builder builder = null;
private boolean viaProto = false;
private List<NodeToAttributes> nodeAttributesMapping = null;
public NodesToAttributesMappingRequestPBImpl() {
builder = NodesToAttributesMappingRequestProto.newBuilder();
}
public NodesToAttributesMappingRequestPBImpl(
NodesToAttributesMappingRequestProto proto) {
this.proto = proto;
viaProto = true;
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
if (this.nodeAttributesMapping != null) {
for (NodeToAttributes nodeAttributes : nodeAttributesMapping) {
builder.addNodeToAttributes(
((NodeToAttributesPBImpl) nodeAttributes).getProto());
}
}
proto = builder.build();
viaProto = true;
}
public NodesToAttributesMappingRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = NodesToAttributesMappingRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public void setNodesToAttributes(List<NodeToAttributes> nodesToAttributes) {
if (nodeAttributesMapping == null) {
nodeAttributesMapping = new ArrayList<>();
}
if(nodesToAttributes == null) {
throw new IllegalArgumentException("nodesToAttributes cannot be null");
}
nodeAttributesMapping.clear();
nodeAttributesMapping.addAll(nodesToAttributes);
}
private void initNodeAttributesMapping() {
if (this.nodeAttributesMapping != null) {
return;
}
NodesToAttributesMappingRequestProtoOrBuilder p =
viaProto ? proto : builder;
List<NodeToAttributesProto> nodeAttributesProtoList =
p.getNodeToAttributesList();
List<NodeToAttributes> attributes = new ArrayList<>();
if (nodeAttributesProtoList == null
|| nodeAttributesProtoList.size() == 0) {
this.nodeAttributesMapping = attributes;
return;
}
for (NodeToAttributesProto nodeAttributeProto : nodeAttributesProtoList) {
attributes.add(new NodeToAttributesPBImpl(nodeAttributeProto));
}
this.nodeAttributesMapping = attributes;
}
@Override
public List<NodeToAttributes> getNodesToAttributes() {
initNodeAttributesMapping();
return this.nodeAttributesMapping;
}
@Override
public void setFailOnUnknownNodes(boolean failOnUnknownNodes) {
maybeInitBuilder();
builder.setFailOnUnknownNodes(failOnUnknownNodes);
}
@Override
public boolean getFailOnUnknownNodes() {
NodesToAttributesMappingRequestProtoOrBuilder p =
viaProto ? proto : builder;
return p.getFailOnUnknownNodes();
}
@Override
public void setOperation(AttributeMappingOperationType operation) {
maybeInitBuilder();
builder.setOperation(convertToProtoFormat(operation));
}
private AttributeMappingOperationTypeProto convertToProtoFormat(
AttributeMappingOperationType operation) {
return AttributeMappingOperationTypeProto.valueOf(operation.name());
}
private AttributeMappingOperationType convertFromProtoFormat(
AttributeMappingOperationTypeProto operationTypeProto) {
return AttributeMappingOperationType.valueOf(operationTypeProto.name());
}
@Override
public AttributeMappingOperationType getOperation() {
NodesToAttributesMappingRequestProtoOrBuilder p =
viaProto ? proto : builder;
if (!p.hasOperation()) {
return null;
}
return convertFromProtoFormat(p.getOperation());
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof NodesToAttributesMappingRequest) {
NodesToAttributesMappingRequest other =
(NodesToAttributesMappingRequest) obj;
if (getNodesToAttributes() == null) {
if (other.getNodesToAttributes() != null) {
return false;
}
} else if (!getNodesToAttributes()
.containsAll(other.getNodesToAttributes())) {
return false;
}
if (getOperation() == null) {
if (other.getOperation() != null) {
return false;
}
} else if (!getOperation().equals(other.getOperation())) {
return false;
}
return getFailOnUnknownNodes() == other.getFailOnUnknownNodes();
}
return false;
}
}

View File

@ -0,0 +1,47 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
public class NodesToAttributesMappingResponsePBImpl
extends NodesToAttributesMappingResponse {
private NodesToAttributesMappingResponseProto proto =
NodesToAttributesMappingResponseProto.getDefaultInstance();
private NodesToAttributesMappingResponseProto.Builder builder = null;
private boolean viaProto = false;
public NodesToAttributesMappingResponsePBImpl() {
builder = NodesToAttributesMappingResponseProto.newBuilder();
}
public NodesToAttributesMappingResponsePBImpl(
NodesToAttributesMappingResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public NodesToAttributesMappingResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
}

View File

@ -124,6 +124,7 @@
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
@ -176,6 +177,7 @@
import org.apache.hadoop.yarn.api.records.impl.pb.ExecutionTypeRequestPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
@ -214,6 +216,7 @@
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
@ -238,6 +241,8 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToAttributesProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
@ -320,10 +325,14 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeToAttributesPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
@ -434,7 +443,12 @@ public static void setup() throws Exception {
generateByNewInstance(ResourceSizing.class);
generateByNewInstance(SchedulingRequest.class);
generateByNewInstance(RejectedSchedulingRequest.class);
}
//for Node attribute support
generateByNewInstance(NodeAttribute.class);
generateByNewInstance(NodeToAttributes.class);
generateByNewInstance(NodesToAttributesMappingRequest.class);
}
@Test
public void testAllocateRequestPBImpl() throws Exception {
@ -1228,4 +1242,22 @@ public void testGetAllResourceTypesInfoResponsePBImpl() throws Exception {
validatePBImplRecord(GetAllResourceTypeInfoResponsePBImpl.class,
YarnServiceProtos.GetAllResourceTypeInfoResponseProto.class);
}
@Test
public void testNodeAttributePBImpl() throws Exception {
validatePBImplRecord(NodeAttributePBImpl.class,
NodeAttributeProto.class);
}
@Test
public void testNodeToAttributesPBImpl() throws Exception {
validatePBImplRecord(NodeToAttributesPBImpl.class,
NodeToAttributesProto.class);
}
@Test
public void testNodesToAttributesMappingRequestPBImpl() throws Exception {
validatePBImplRecord(NodesToAttributesMappingRequestPBImpl.class,
NodesToAttributesMappingRequestProto.class);
}
}

View File

@ -164,6 +164,8 @@
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import com.google.common.base.Strings;
@ -181,15 +183,15 @@ public class MockResourceManagerFacade implements ApplicationClientProtocol,
private HashSet<ApplicationId> applicationMap = new HashSet<>();
private HashSet<ApplicationId> keepContainerOnUams = new HashSet<>();
private HashMap<ApplicationAttemptId, List<ContainerId>>
applicationContainerIdMap = new HashMap<>();
private HashMap<ApplicationAttemptId, List<ContainerId>> applicationContainerIdMap =
new HashMap<ApplicationAttemptId, List<ContainerId>>();
private AtomicInteger containerIndex = new AtomicInteger(0);
private Configuration conf;
private int subClusterId;
final private AtomicInteger applicationCounter = new AtomicInteger(0);
// True if the Mock RM is running, false otherwise.
// This property allows us to write tests for specific scenario as YARN RM
// This property allows us to write tests for specific scenario as Yarn RM
// down e.g. network issue, failover.
private boolean isRunning;
@ -487,7 +489,7 @@ public SubmitApplicationResponse submitApplication(
if (request.getApplicationSubmissionContext().getUnmanagedAM()
|| request.getApplicationSubmissionContext()
.getKeepContainersAcrossApplicationAttempts()) {
keepContainerOnUams.add(appId);
keepContainerOnUams.add(appId);
}
return SubmitApplicationResponse.newInstance();
}
@ -505,7 +507,6 @@ public KillApplicationResponse forceKillApplication(
throw new ApplicationNotFoundException(
"Trying to kill an absent application: " + appId);
}
keepContainerOnUams.remove(appId);
}
LOG.info("Force killing application: " + appId);
return KillApplicationResponse.newInstance(true);
@ -893,4 +894,10 @@ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
return null;
}
@Override
public NodesToAttributesMappingResponse mapAttributesToNodes(NodesToAttributesMappingRequest request)
throws YarnException, IOException {
return null;
}
}

View File

@ -66,6 +66,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
@ -961,4 +963,12 @@ private void refreshClusterMaxPriority() throws IOException, YarnException {
rm.getRMContext().getScheduler().setClusterMaxPriority(conf);
}
@Override
public NodesToAttributesMappingResponse mapAttributesToNodes(
NodesToAttributesMappingRequest request)
throws YarnException, IOException {
return recordFactory
.newRecordInstance(NodesToAttributesMappingResponse.class);
}
}

View File

@ -32,6 +32,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
@ -212,4 +214,11 @@ public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
public String[] getGroupsForUser(String userName) throws IOException {
return rmAdminProxy.getGroupsForUser(userName);
}
@Override
public NodesToAttributesMappingResponse mapAttributesToNodes(
NodesToAttributesMappingRequest request)
throws YarnException, IOException {
return rmAdminProxy.mapAttributesToNodes(request);
}
}

View File

@ -43,6 +43,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
@ -419,4 +421,12 @@ public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
RequestInterceptorChainWrapper pipeline = getInterceptorChain();
return pipeline.getRootInterceptor().refreshClusterMaxPriority(request);
}
@Override
public NodesToAttributesMappingResponse mapAttributesToNodes(
NodesToAttributesMappingRequest request)
throws YarnException, IOException {
RequestInterceptorChainWrapper pipeline = getInterceptorChain();
return pipeline.getRootInterceptor().mapAttributesToNodes(request);
}
}

View File

@ -26,6 +26,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
@ -145,4 +147,11 @@ public String[] getGroupsForUser(String user) throws IOException {
return getNextInterceptor().getGroupsForUser(user);
}
@Override
public NodesToAttributesMappingResponse mapAttributesToNodes(
NodesToAttributesMappingRequest request)
throws YarnException, IOException {
return getNextInterceptor().mapAttributesToNodes(request);
}
}