YARN-3345. Add non-exclusive node label API. Contributed by Wangda Tan
(cherry picked from commit e1feb4ea1a
)
This commit is contained in:
parent
9aedb3d688
commit
9c494cedaf
|
@ -6,6 +6,8 @@ Release 2.8.0 - UNRELEASED
|
|||
|
||||
NEW FEATURES
|
||||
|
||||
YARN-3345. Add non-exclusive node label API. (Wangda Tan via jianhe)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
YARN-3243. CapacityScheduler should pass headroom from parent to children
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.api.records;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Stable;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public abstract class NodeLabel {
|
||||
@Public
|
||||
@Unstable
|
||||
public static NodeLabel newInstance(String nodeLabel,
|
||||
boolean isExclusive) {
|
||||
NodeLabel request =
|
||||
Records.newRecord(NodeLabel.class);
|
||||
request.setNodeLabel(nodeLabel);
|
||||
request.setIsExclusive(isExclusive);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Public
|
||||
@Stable
|
||||
public abstract String getNodeLabel();
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public abstract void setNodeLabel(String nodeLabel);
|
||||
|
||||
@Public
|
||||
@Stable
|
||||
public abstract boolean getIsExclusive();
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public abstract void setIsExclusive(boolean isExclusive);
|
||||
}
|
|
@ -48,6 +48,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLa
|
|||
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
|
||||
|
||||
|
@ -120,8 +122,8 @@ public interface ResourceManagerAdministrationProtocol extends GetUserMappingsPr
|
|||
@Public
|
||||
@Evolving
|
||||
@Idempotent
|
||||
public AddToClusterNodeLabelsResponse addToClusterNodeLabels(AddToClusterNodeLabelsRequest request)
|
||||
throws YarnException, IOException;
|
||||
public AddToClusterNodeLabelsResponse addToClusterNodeLabels(
|
||||
AddToClusterNodeLabelsRequest request) throws YarnException, IOException;
|
||||
|
||||
@Public
|
||||
@Evolving
|
||||
|
@ -134,4 +136,10 @@ public interface ResourceManagerAdministrationProtocol extends GetUserMappingsPr
|
|||
@Idempotent
|
||||
public ReplaceLabelsOnNodeResponse replaceLabelsOnNode(
|
||||
ReplaceLabelsOnNodeRequest request) throws YarnException, IOException;
|
||||
|
||||
@Public
|
||||
@Evolving
|
||||
@Idempotent
|
||||
public UpdateNodeLabelsResponse updateNodeLabels(
|
||||
UpdateNodeLabelsRequest request) throws YarnException, IOException;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.api.protocolrecords;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public abstract class UpdateNodeLabelsRequest {
|
||||
@Public
|
||||
@Unstable
|
||||
public static UpdateNodeLabelsRequest newInstance(
|
||||
List<NodeLabel> NodeLabels) {
|
||||
UpdateNodeLabelsRequest request =
|
||||
Records.newRecord(UpdateNodeLabelsRequest.class);
|
||||
request.setNodeLabels(NodeLabels);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public abstract void setNodeLabels(
|
||||
List<NodeLabel> NodeLabels);
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public abstract List<NodeLabel> getNodeLabels();
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.api.protocolrecords;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public class UpdateNodeLabelsResponse {
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public static UpdateNodeLabelsResponse newInstance() {
|
||||
UpdateNodeLabelsResponse response =
|
||||
Records.newRecord(UpdateNodeLabelsResponse.class);
|
||||
return response;
|
||||
}
|
||||
}
|
|
@ -42,4 +42,5 @@ service ResourceManagerAdministrationProtocolService {
|
|||
rpc addToClusterNodeLabels(AddToClusterNodeLabelsRequestProto) returns (AddToClusterNodeLabelsResponseProto);
|
||||
rpc removeFromClusterNodeLabels(RemoveFromClusterNodeLabelsRequestProto) returns (RemoveFromClusterNodeLabelsResponseProto);
|
||||
rpc replaceLabelsOnNodes(ReplaceLabelsOnNodeRequestProto) returns (ReplaceLabelsOnNodeResponseProto);
|
||||
rpc updateNodeLabels(UpdateNodeLabelsRequestProto) returns (UpdateNodeLabelsResponseProto);
|
||||
}
|
||||
|
|
|
@ -97,6 +97,14 @@ message ReplaceLabelsOnNodeResponseProto {
|
|||
|
||||
}
|
||||
|
||||
message UpdateNodeLabelsRequestProto {
|
||||
repeated NodeLabelProto nodeLabels = 1;
|
||||
}
|
||||
|
||||
|
||||
message UpdateNodeLabelsResponseProto {
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
///////////// RM Failover related records ////////////////////////
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -244,6 +244,11 @@ message LabelsToNodeIdsProto {
|
|||
repeated NodeIdProto nodeId = 2;
|
||||
}
|
||||
|
||||
message NodeLabelProto {
|
||||
optional string nodeLabel = 1;
|
||||
optional bool isExclusive = 2 [default = true];
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
////// From AM_RM_Protocol /////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.api.records.impl.pb;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProtoOrBuilder;
|
||||
|
||||
public class NodeLabelPBImpl extends NodeLabel {
|
||||
NodeLabelProto proto =
|
||||
NodeLabelProto.getDefaultInstance();
|
||||
NodeLabelProto.Builder builder = null;
|
||||
boolean viaProto = false;
|
||||
|
||||
public NodeLabelPBImpl() {
|
||||
builder = NodeLabelProto.newBuilder();
|
||||
}
|
||||
|
||||
public NodeLabelPBImpl(NodeLabelProto proto) {
|
||||
this.proto = proto;
|
||||
viaProto = true;
|
||||
}
|
||||
|
||||
public NodeLabelProto getProto() {
|
||||
mergeLocalToProto();
|
||||
proto = viaProto ? proto : builder.build();
|
||||
viaProto = true;
|
||||
return proto;
|
||||
}
|
||||
|
||||
private void mergeLocalToProto() {
|
||||
if (viaProto)
|
||||
maybeInitBuilder();
|
||||
proto = builder.build();
|
||||
viaProto = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (other == null)
|
||||
return false;
|
||||
if (other.getClass().isAssignableFrom(this.getClass())) {
|
||||
return this.getProto().equals(this.getClass().cast(other).getProto());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void maybeInitBuilder() {
|
||||
if (viaProto || builder == null) {
|
||||
builder = NodeLabelProto.newBuilder(proto);
|
||||
}
|
||||
viaProto = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return getProto().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNodeLabel() {
|
||||
NodeLabelProtoOrBuilder p = viaProto ? proto : builder;
|
||||
if (!p.hasNodeLabel()) {
|
||||
return null;
|
||||
}
|
||||
return (p.getNodeLabel());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNodeLabel(String nodeLabel) {
|
||||
maybeInitBuilder();
|
||||
if (nodeLabel == null) {
|
||||
builder.clearNodeLabel();
|
||||
return;
|
||||
}
|
||||
builder.setNodeLabel(nodeLabel);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getIsExclusive() {
|
||||
NodeLabelProtoOrBuilder p = viaProto ? proto : builder;
|
||||
return p.getIsExclusive();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setIsExclusive(boolean isExclusive) {
|
||||
maybeInitBuilder();
|
||||
builder.setIsExclusive(isExclusive);
|
||||
}
|
||||
|
||||
}
|
|
@ -24,6 +24,7 @@ import java.util.Collections;
|
|||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
@ -40,16 +41,19 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent;
|
||||
import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType;
|
||||
import org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels;
|
||||
import org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels;
|
||||
import org.apache.hadoop.yarn.nodelabels.event.StoreUpdateNodeLabelsEvent;
|
||||
import org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
|
||||
|
@ -83,8 +87,8 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
|
||||
protected Dispatcher dispatcher;
|
||||
|
||||
protected ConcurrentMap<String, NodeLabel> labelCollections =
|
||||
new ConcurrentHashMap<String, NodeLabel>();
|
||||
protected ConcurrentMap<String, RMNodeLabel> labelCollections =
|
||||
new ConcurrentHashMap<String, RMNodeLabel>();
|
||||
protected ConcurrentMap<String, Host> nodeCollections =
|
||||
new ConcurrentHashMap<String, Host>();
|
||||
|
||||
|
@ -181,6 +185,13 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
store.updateNodeToLabelsMappings(updateNodeToLabelsMappingsEvent
|
||||
.getNodeToLabels());
|
||||
break;
|
||||
case UPDATE_NODE_LABELS:
|
||||
StoreUpdateNodeLabelsEvent
|
||||
storeSetNodeLabelsEventEvent =
|
||||
(StoreUpdateNodeLabelsEvent) event;
|
||||
store.updateNodeLabels(storeSetNodeLabelsEventEvent
|
||||
.getUpdatedNodeLabels());
|
||||
break;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to store label modification to storage");
|
||||
|
@ -214,7 +225,7 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
initNodeLabelStore(conf);
|
||||
}
|
||||
|
||||
labelCollections.put(NO_LABEL, new NodeLabel(NO_LABEL));
|
||||
labelCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL));
|
||||
}
|
||||
|
||||
protected void initNodeLabelStore(Configuration conf) throws Exception {
|
||||
|
@ -288,7 +299,7 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
for (String label : labels) {
|
||||
// shouldn't overwrite it to avoid changing the Label.resource
|
||||
if (this.labelCollections.get(label) == null) {
|
||||
this.labelCollections.put(label, new NodeLabel(label));
|
||||
this.labelCollections.put(label, new RMNodeLabel(label));
|
||||
newLabels.add(label);
|
||||
}
|
||||
}
|
||||
|
@ -746,7 +757,7 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
if(label.equals(NO_LABEL)) {
|
||||
continue;
|
||||
}
|
||||
NodeLabel nodeLabelInfo = labelCollections.get(label);
|
||||
RMNodeLabel nodeLabelInfo = labelCollections.get(label);
|
||||
if(nodeLabelInfo != null) {
|
||||
Set<NodeId> nodeIds = nodeLabelInfo.getAssociatedNodeIds();
|
||||
if (!nodeIds.isEmpty()) {
|
||||
|
@ -778,6 +789,60 @@ public class CommonNodeLabelsManager extends AbstractService {
|
|||
}
|
||||
}
|
||||
|
||||
private void checkUpdateNodeLabels(
|
||||
List<NodeLabel> updatedNodeLabels) throws YarnException {
|
||||
// pre-check
|
||||
for (NodeLabel label : updatedNodeLabels) {
|
||||
if (!labelCollections.containsKey(label.getNodeLabel())) {
|
||||
String message =
|
||||
String.format(
|
||||
"Trying to update a non-existing node-label=%s",
|
||||
label.getNodeLabel());
|
||||
LOG.error(message);
|
||||
throw new YarnException(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void updateNodeLabels(
|
||||
List<NodeLabel> updatedNodeLabels) throws YarnException {
|
||||
try {
|
||||
writeLock.lock();
|
||||
checkUpdateNodeLabels(updatedNodeLabels);
|
||||
|
||||
for (NodeLabel label : updatedNodeLabels) {
|
||||
RMNodeLabel rmLabel = labelCollections.get(label.getNodeLabel());
|
||||
rmLabel.setIsExclusive(label.getIsExclusive());
|
||||
}
|
||||
|
||||
if (null != dispatcher && !updatedNodeLabels.isEmpty()) {
|
||||
dispatcher.getEventHandler().handle(
|
||||
new StoreUpdateNodeLabelsEvent(updatedNodeLabels));
|
||||
}
|
||||
} finally {
|
||||
writeLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isExclusiveNodeLabel(String nodeLabel) throws IOException {
|
||||
try {
|
||||
readLock.lock();
|
||||
RMNodeLabel label = labelCollections.get(nodeLabel);
|
||||
if (label == null) {
|
||||
String message =
|
||||
"Getting is-exclusive-node-label, node-label = " + nodeLabel
|
||||
+ ", is not existed.";
|
||||
LOG.error(message);
|
||||
throw new IOException(message);
|
||||
}
|
||||
return label.getIsExclusive();
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void checkAndThrowLabelName(String label) throws IOException {
|
||||
if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) {
|
||||
throw new IOException("label added is empty or exceeds "
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.nodelabels;
|
|||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -34,16 +35,21 @@ import org.apache.hadoop.fs.LocalFileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeLabelsRequestPBImpl;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
|
@ -60,7 +66,7 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
|
|||
protected static final String EDITLOG_FILENAME = "nodelabel.editlog";
|
||||
|
||||
protected enum SerializedLogType {
|
||||
ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS
|
||||
ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS, UPDATE_NODE_LABELS
|
||||
}
|
||||
|
||||
Path fsWorkingPath;
|
||||
|
@ -152,7 +158,17 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void recover() throws IOException {
|
||||
public void updateNodeLabels(List<NodeLabel> updatedNodeLabels)
|
||||
throws IOException {
|
||||
ensureAppendEditlogFile();
|
||||
editlogOs.writeInt(SerializedLogType.UPDATE_NODE_LABELS.ordinal());
|
||||
((UpdateNodeLabelsRequestPBImpl) UpdateNodeLabelsRequest
|
||||
.newInstance(updatedNodeLabels)).getProto().writeDelimitedTo(editlogOs);
|
||||
ensureCloseEditlogFile();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void recover() throws YarnException, IOException {
|
||||
/*
|
||||
* Steps of recover
|
||||
* 1) Read from last mirror (from mirror or mirror.old)
|
||||
|
@ -221,6 +237,14 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
|
|||
mgr.replaceLabelsOnNode(map);
|
||||
break;
|
||||
}
|
||||
case UPDATE_NODE_LABELS: {
|
||||
List<NodeLabel> attributes =
|
||||
new UpdateNodeLabelsRequestPBImpl(
|
||||
UpdateNodeLabelsRequestProto.parseDelimitedFrom(is))
|
||||
.getNodeLabels();
|
||||
mgr.updateNodeLabels(attributes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (EOFException e) {
|
||||
// EOF hit, break
|
||||
|
|
|
@ -21,11 +21,14 @@ package org.apache.hadoop.yarn.nodelabels;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
|
||||
public abstract class NodeLabelsStore implements Closeable {
|
||||
protected final CommonNodeLabelsManager mgr;
|
||||
|
@ -52,10 +55,16 @@ public abstract class NodeLabelsStore implements Closeable {
|
|||
public abstract void removeClusterNodeLabels(Collection<String> labels)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Update node labels
|
||||
*/
|
||||
public abstract void updateNodeLabels(
|
||||
List<NodeLabel> updatedNodeLabels) throws IOException;
|
||||
|
||||
/**
|
||||
* Recover labels and node to labels mappings from store
|
||||
*/
|
||||
public abstract void recover() throws IOException;
|
||||
public abstract void recover() throws IOException, YarnException;
|
||||
|
||||
public void init(Configuration conf) throws Exception {}
|
||||
|
||||
|
|
|
@ -25,17 +25,18 @@ import org.apache.hadoop.yarn.api.records.NodeId;
|
|||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
|
||||
public class NodeLabel implements Comparable<NodeLabel> {
|
||||
public class RMNodeLabel implements Comparable<RMNodeLabel> {
|
||||
private Resource resource;
|
||||
private int numActiveNMs;
|
||||
private String labelName;
|
||||
private Set<NodeId> nodeIds;
|
||||
private boolean exclusive = true;
|
||||
|
||||
public NodeLabel(String labelName) {
|
||||
public RMNodeLabel(String labelName) {
|
||||
this(labelName, Resource.newInstance(0, 0), 0);
|
||||
}
|
||||
|
||||
protected NodeLabel(String labelName, Resource res, int activeNMs) {
|
||||
protected RMNodeLabel(String labelName, Resource res, int activeNMs) {
|
||||
this.labelName = labelName;
|
||||
this.resource = res;
|
||||
this.numActiveNMs = activeNMs;
|
||||
|
@ -76,12 +77,20 @@ public class NodeLabel implements Comparable<NodeLabel> {
|
|||
return labelName;
|
||||
}
|
||||
|
||||
public NodeLabel getCopy() {
|
||||
return new NodeLabel(labelName, resource, numActiveNMs);
|
||||
public void setIsExclusive(boolean exclusive) {
|
||||
this.exclusive = exclusive;
|
||||
}
|
||||
|
||||
public boolean getIsExclusive() {
|
||||
return this.exclusive;
|
||||
}
|
||||
|
||||
public RMNodeLabel getCopy() {
|
||||
return new RMNodeLabel(labelName, resource, numActiveNMs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(NodeLabel o) {
|
||||
public int compareTo(RMNodeLabel o) {
|
||||
// We should always put empty label entry first after sorting
|
||||
if (labelName.isEmpty() != o.getLabelName().isEmpty()) {
|
||||
if (labelName.isEmpty()) {
|
||||
|
@ -95,8 +104,8 @@ public class NodeLabel implements Comparable<NodeLabel> {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj instanceof NodeLabel) {
|
||||
NodeLabel other = (NodeLabel) obj;
|
||||
if (obj instanceof RMNodeLabel) {
|
||||
RMNodeLabel other = (RMNodeLabel) obj;
|
||||
return Resources.equals(resource, other.getResource())
|
||||
&& StringUtils.equals(labelName, other.getLabelName())
|
||||
&& (other.getNumActiveNMs() == numActiveNMs);
|
|
@ -21,5 +21,6 @@ package org.apache.hadoop.yarn.nodelabels.event;
|
|||
public enum NodeLabelsStoreEventType {
|
||||
REMOVE_LABELS,
|
||||
ADD_LABELS,
|
||||
STORE_NODE_TO_LABELS
|
||||
STORE_NODE_TO_LABELS,
|
||||
UPDATE_NODE_LABELS
|
||||
}
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.nodelabels.event;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
|
||||
public class StoreUpdateNodeLabelsEvent extends NodeLabelsStoreEvent {
|
||||
private List<NodeLabel> updatedNodeLabels;
|
||||
|
||||
public StoreUpdateNodeLabelsEvent(List<NodeLabel> updateNodeLabels) {
|
||||
super(NodeLabelsStoreEventType.UPDATE_NODE_LABELS);
|
||||
this.updatedNodeLabels = updateNodeLabels;
|
||||
}
|
||||
|
||||
public List<NodeLabel> getUpdatedNodeLabels() {
|
||||
return updatedNodeLabels;
|
||||
}
|
||||
}
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.Refre
|
|||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
|
||||
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
|
||||
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
|
||||
|
@ -61,6 +62,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLa
|
|||
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
|
||||
|
@ -81,6 +84,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClust
|
|||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
|
||||
|
||||
|
@ -263,4 +268,18 @@ public class ResourceManagerAdministrationProtocolPBClientImpl implements Resour
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateNodeLabelsResponse updateNodeLabels(
|
||||
UpdateNodeLabelsRequest request) throws YarnException, IOException {
|
||||
UpdateNodeLabelsRequestProto requestProto =
|
||||
((UpdateNodeLabelsRequestPBImpl) request).getProto();
|
||||
try {
|
||||
return new UpdateNodeLabelsResponsePBImpl(
|
||||
proxy.updateNodeLabels(null, requestProto));
|
||||
} catch (ServiceException e) {
|
||||
RPCUtil.unwrapAndThrowException(e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.Remov
|
|||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
|
||||
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
|
||||
|
@ -55,6 +57,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsC
|
|||
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
|
||||
|
@ -74,6 +78,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClust
|
|||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
|
||||
|
||||
|
@ -268,4 +274,21 @@ public class ResourceManagerAdministrationProtocolPBServiceImpl implements Resou
|
|||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateNodeLabelsResponseProto updateNodeLabels(
|
||||
RpcController controller, UpdateNodeLabelsRequestProto proto)
|
||||
throws ServiceException {
|
||||
UpdateNodeLabelsRequest request =
|
||||
new UpdateNodeLabelsRequestPBImpl(proto);
|
||||
try {
|
||||
UpdateNodeLabelsResponse response =
|
||||
real.updateNodeLabels(request);
|
||||
return ((UpdateNodeLabelsResponsePBImpl) response).getProto();
|
||||
} catch (YarnException e) {
|
||||
throw new ServiceException(e);
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsRequestProtoOrBuilder;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsRequest;
|
||||
|
||||
public class UpdateNodeLabelsRequestPBImpl extends
|
||||
UpdateNodeLabelsRequest {
|
||||
UpdateNodeLabelsRequestProto proto =
|
||||
UpdateNodeLabelsRequestProto.getDefaultInstance();
|
||||
UpdateNodeLabelsRequestProto.Builder builder = null;
|
||||
private List<NodeLabel> updatedNodeLabels;
|
||||
boolean viaProto = false;
|
||||
|
||||
public UpdateNodeLabelsRequestPBImpl() {
|
||||
builder = UpdateNodeLabelsRequestProto.newBuilder();
|
||||
}
|
||||
|
||||
public UpdateNodeLabelsRequestPBImpl(
|
||||
UpdateNodeLabelsRequestProto proto) {
|
||||
this.proto = proto;
|
||||
viaProto = true;
|
||||
}
|
||||
|
||||
public UpdateNodeLabelsRequestProto getProto() {
|
||||
mergeLocalToProto();
|
||||
proto = viaProto ? proto : builder.build();
|
||||
viaProto = true;
|
||||
return proto;
|
||||
}
|
||||
|
||||
private void mergeLocalToProto() {
|
||||
if (viaProto)
|
||||
maybeInitBuilder();
|
||||
mergeLocalToBuilder();
|
||||
proto = builder.build();
|
||||
viaProto = true;
|
||||
}
|
||||
|
||||
private void mergeLocalToBuilder() {
|
||||
if (this.updatedNodeLabels != null) {
|
||||
addNodeLabelsToProto();
|
||||
}
|
||||
}
|
||||
|
||||
private void addNodeLabelsToProto() {
|
||||
maybeInitBuilder();
|
||||
builder.clearNodeLabels();
|
||||
List<NodeLabelProto> protoList =
|
||||
new ArrayList<NodeLabelProto>();
|
||||
for (NodeLabel r : this.updatedNodeLabels) {
|
||||
protoList.add(convertToProtoFormat(r));
|
||||
}
|
||||
builder.addAllNodeLabels(protoList);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (other == null)
|
||||
return false;
|
||||
if (other.getClass().isAssignableFrom(this.getClass())) {
|
||||
return this.getProto().equals(this.getClass().cast(other).getProto());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
assert false : "hashCode not designed";
|
||||
return 0;
|
||||
}
|
||||
|
||||
private void maybeInitBuilder() {
|
||||
if (viaProto || builder == null) {
|
||||
builder = UpdateNodeLabelsRequestProto.newBuilder(proto);
|
||||
}
|
||||
viaProto = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNodeLabels(List<NodeLabel> updatedNodeLabels) {
|
||||
maybeInitBuilder();
|
||||
if (updatedNodeLabels == null) {
|
||||
builder.clearNodeLabels();
|
||||
}
|
||||
this.updatedNodeLabels = updatedNodeLabels;
|
||||
}
|
||||
|
||||
private void initLocalNodeLabels() {
|
||||
UpdateNodeLabelsRequestProtoOrBuilder p = viaProto ? proto : builder;
|
||||
List<NodeLabelProto> attributesProtoList =
|
||||
p.getNodeLabelsList();
|
||||
this.updatedNodeLabels = new ArrayList<NodeLabel>();
|
||||
for (NodeLabelProto r : attributesProtoList) {
|
||||
this.updatedNodeLabels.add(convertFromProtoFormat(r));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<NodeLabel> getNodeLabels() {
|
||||
if (this.updatedNodeLabels != null) {
|
||||
return this.updatedNodeLabels;
|
||||
}
|
||||
initLocalNodeLabels();
|
||||
return this.updatedNodeLabels;
|
||||
}
|
||||
|
||||
private NodeLabel
|
||||
convertFromProtoFormat(NodeLabelProto p) {
|
||||
return new NodeLabelPBImpl(p);
|
||||
}
|
||||
|
||||
private NodeLabelProto convertToProtoFormat(NodeLabel t) {
|
||||
return ((NodeLabelPBImpl) t).getProto();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getProto().toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
|
||||
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsResponse;
|
||||
|
||||
public class UpdateNodeLabelsResponsePBImpl extends
|
||||
UpdateNodeLabelsResponse {
|
||||
UpdateNodeLabelsResponseProto proto =
|
||||
UpdateNodeLabelsResponseProto.getDefaultInstance();
|
||||
UpdateNodeLabelsResponseProto.Builder builder = null;
|
||||
boolean viaProto = false;
|
||||
|
||||
public UpdateNodeLabelsResponsePBImpl() {
|
||||
builder = UpdateNodeLabelsResponseProto.newBuilder();
|
||||
}
|
||||
|
||||
public UpdateNodeLabelsResponsePBImpl(
|
||||
UpdateNodeLabelsResponseProto proto) {
|
||||
this.proto = proto;
|
||||
viaProto = true;
|
||||
}
|
||||
|
||||
public UpdateNodeLabelsResponseProto getProto() {
|
||||
proto = viaProto ? proto : builder.build();
|
||||
viaProto = true;
|
||||
return proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return getProto().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (other == null)
|
||||
return false;
|
||||
if (other.getClass().isAssignableFrom(this.getClass())) {
|
||||
return this.getProto().equals(this.getClass().cast(other).getProto());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getProto().toString().replaceAll("\\n", ", ")
|
||||
.replaceAll("\\s+", " ");
|
||||
}
|
||||
}
|
|
@ -36,15 +36,280 @@ import java.util.Set;
|
|||
import org.apache.commons.lang.math.LongRange;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.*;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.*;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.*;
|
||||
import org.apache.hadoop.yarn.api.records.*;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.*;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.*;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.*;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.*;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerReport;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerResourceIncrease;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerResourceIncreaseRequest;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResource;
|
||||
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
|
||||
import org.apache.hadoop.yarn.api.records.NMToken;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.api.records.NodeReport;
|
||||
import org.apache.hadoop.yarn.api.records.PreemptionContainer;
|
||||
import org.apache.hadoop.yarn.api.records.PreemptionContract;
|
||||
import org.apache.hadoop.yarn.api.records.PreemptionMessage;
|
||||
import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest;
|
||||
import org.apache.hadoop.yarn.api.records.Priority;
|
||||
import org.apache.hadoop.yarn.api.records.QueueInfo;
|
||||
import org.apache.hadoop.yarn.api.records.QueueState;
|
||||
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
|
||||
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
|
||||
import org.apache.hadoop.yarn.api.records.ReservationId;
|
||||
import org.apache.hadoop.yarn.api.records.ReservationRequest;
|
||||
import org.apache.hadoop.yarn.api.records.ReservationRequests;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceOption;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.api.records.SerializedException;
|
||||
import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
|
||||
import org.apache.hadoop.yarn.api.records.Token;
|
||||
import org.apache.hadoop.yarn.api.records.URL;
|
||||
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptReportPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerReportPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceDecreasePBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreasePBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreaseRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContainerPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContractPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionResourceRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.QueueUserACLInfoPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceDecreaseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.NMTokenProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeLabelsRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeLabelsResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -212,6 +477,7 @@ public class TestPBImplRecords {
|
|||
generateByNewInstance(StrictPreemptionContract.class);
|
||||
generateByNewInstance(PreemptionMessage.class);
|
||||
generateByNewInstance(StartContainerRequest.class);
|
||||
generateByNewInstance(NodeLabel.class);
|
||||
// genByNewInstance does not apply to QueueInfo, cause
|
||||
// it is recursive(has sub queues)
|
||||
typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f,
|
||||
|
@ -1015,4 +1281,22 @@ public class TestPBImplRecords {
|
|||
validatePBImplRecord(GetLabelsToNodesResponsePBImpl.class,
|
||||
GetLabelsToNodesResponseProto.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNodeLabelAttributesPBImpl() throws Exception {
|
||||
validatePBImplRecord(NodeLabelPBImpl.class,
|
||||
NodeLabelProto.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateNodeLabelsRequestPBImpl() throws Exception {
|
||||
validatePBImplRecord(UpdateNodeLabelsRequestPBImpl.class,
|
||||
UpdateNodeLabelsRequestProto.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateNodeLabelsResponsePBImpl() throws Exception {
|
||||
validatePBImplRecord(UpdateNodeLabelsResponsePBImpl.class,
|
||||
UpdateNodeLabelsResponseProto.class);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,17 +20,20 @@ package org.apache.hadoop.yarn.nodelabels;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.event.InlineDispatcher;
|
||||
|
||||
public class DummyCommonNodeLabelsManager extends CommonNodeLabelsManager {
|
||||
Map<NodeId, Set<String>> lastNodeToLabels = null;
|
||||
Collection<String> lastAddedlabels = null;
|
||||
Collection<String> lastRemovedlabels = null;
|
||||
List<NodeLabel> lastUpdatedNodeLabels = null;
|
||||
|
||||
@Override
|
||||
public void initNodeLabelStore(Configuration conf) {
|
||||
|
@ -57,6 +60,12 @@ public class DummyCommonNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
lastAddedlabels = label;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateNodeLabels(List<NodeLabel> updatedNodeLabels)
|
||||
throws IOException {
|
||||
lastUpdatedNodeLabels = updatedNodeLabels;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
// do nothing
|
||||
|
|
|
@ -29,7 +29,9 @@ import java.util.Set;
|
|||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -536,4 +538,30 @@ public class TestCommonNodeLabelsManager extends NodeLabelTestBase {
|
|||
Assert.assertTrue("Should failed when #labels > 1 on a host after add",
|
||||
failed);
|
||||
}
|
||||
|
||||
@Test (timeout = 5000)
|
||||
public void testUpdateNodeLabels() throws Exception {
|
||||
boolean failed = false;
|
||||
|
||||
// should fail: label isn't exist
|
||||
try {
|
||||
mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance(
|
||||
"p1", false)));
|
||||
} catch (YarnException e) {
|
||||
failed = true;
|
||||
}
|
||||
Assert.assertTrue("Should fail since the node label doesn't exist", failed);
|
||||
|
||||
mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3"));
|
||||
|
||||
mgr.updateNodeLabels(Arrays.asList(
|
||||
NodeLabel.newInstance("p1", false), NodeLabel.newInstance("p2", true)));
|
||||
Assert.assertEquals("p1", mgr.lastUpdatedNodeLabels.get(0).getNodeLabel());
|
||||
Assert.assertFalse(mgr.lastUpdatedNodeLabels.get(0).getIsExclusive());
|
||||
Assert.assertTrue(mgr.lastUpdatedNodeLabels.get(1).getIsExclusive());
|
||||
|
||||
// Check exclusive for p1/p2
|
||||
Assert.assertFalse(mgr.isExclusiveNodeLabel("p1"));
|
||||
Assert.assertTrue(mgr.isExclusiveNodeLabel("p2"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.Arrays;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.InlineDispatcher;
|
||||
import org.junit.After;
|
||||
|
@ -188,7 +189,7 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
|
|||
}
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
@Test//(timeout = 10000)
|
||||
@Test (timeout = 10000)
|
||||
public void testSerilizationAfterRecovery() throws Exception {
|
||||
mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3"));
|
||||
mgr.addToCluserNodeLabels(toSet("p4"));
|
||||
|
@ -218,6 +219,14 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
|
|||
* p4: n4
|
||||
* p6: n6, n7
|
||||
*/
|
||||
|
||||
mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("p2", false)));
|
||||
mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("p6", false)));
|
||||
|
||||
/*
|
||||
* Set p2/p6 to be exclusive
|
||||
*/
|
||||
|
||||
// shutdown mgr and start a new mgr
|
||||
mgr.stop();
|
||||
|
||||
|
@ -239,6 +248,10 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
|
|||
"p4", toSet(toNodeId("n4")),
|
||||
"p2", toSet(toNodeId("n2"))));
|
||||
|
||||
Assert.assertFalse(mgr.isExclusiveNodeLabel("p2"));
|
||||
Assert.assertTrue(mgr.isExclusiveNodeLabel("p4"));
|
||||
Assert.assertFalse(mgr.isExclusiveNodeLabel("p6"));
|
||||
|
||||
/*
|
||||
* Add label p7,p8 then shutdown
|
||||
*/
|
||||
|
|
|
@ -77,6 +77,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLa
|
|||
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeLabelsResponse;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
|
||||
|
@ -664,6 +666,28 @@ public class AdminService extends CompositeService implements
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateNodeLabelsResponse updateNodeLabels(
|
||||
UpdateNodeLabelsRequest request) throws YarnException, IOException {
|
||||
String argName = "updateNodeLabels";
|
||||
final String msg = "update node labels";
|
||||
UserGroupInformation user = checkAcls(argName);
|
||||
|
||||
checkRMStatus(user.getShortUserName(), argName, msg);
|
||||
|
||||
UpdateNodeLabelsResponse response = UpdateNodeLabelsResponse.newInstance();
|
||||
|
||||
try {
|
||||
rmContext.getNodeLabelManager().updateNodeLabels(
|
||||
request.getNodeLabels());
|
||||
RMAuditLogger
|
||||
.logSuccess(user.getShortUserName(), argName, "AdminService");
|
||||
return response;
|
||||
} catch (YarnException ioe) {
|
||||
throw logAndWrapException(ioe, user.getShortUserName(), argName, msg);
|
||||
}
|
||||
}
|
||||
|
||||
private void checkRMStatus(String user, String argName, String msg)
|
||||
throws StandbyException {
|
||||
if (!isRMActive()) {
|
||||
|
@ -673,11 +697,11 @@ public class AdminService extends CompositeService implements
|
|||
}
|
||||
}
|
||||
|
||||
private YarnException logAndWrapException(IOException ioe, String user,
|
||||
private YarnException logAndWrapException(Exception exception, String user,
|
||||
String argName, String msg) throws YarnException {
|
||||
LOG.info("Exception " + msg, ioe);
|
||||
LOG.info("Exception " + msg, exception);
|
||||
RMAuditLogger.logFailure(user, argName, "",
|
||||
"AdminService", "Exception " + msg);
|
||||
return RPCUtil.getRemoteException(ioe);
|
||||
return RPCUtil.getRemoteException(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeLabel;
|
||||
import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
|
||||
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeLabelsUpdateSchedulerEvent;
|
||||
|
@ -45,7 +45,6 @@ import org.apache.hadoop.yarn.util.resource.Resources;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
|
||||
public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
||||
|
||||
protected static class Queue {
|
||||
protected Set<String> acccessibleNodeLabels;
|
||||
protected Resource resource;
|
||||
|
@ -172,7 +171,6 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Following methods are used for setting if a node is up and running, and it
|
||||
* will update running nodes resource
|
||||
|
@ -201,7 +199,7 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
Set<String> labelsForNode = getLabelsByNode(nodeId);
|
||||
if (labelsForNode != null) {
|
||||
for (String label : labelsForNode) {
|
||||
NodeLabel labelInfo = labelCollections.get(label);
|
||||
RMNodeLabel labelInfo = labelCollections.get(label);
|
||||
if(labelInfo != null) {
|
||||
labelInfo.addNodeId(nodeId);
|
||||
}
|
||||
|
@ -383,7 +381,7 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
// no label in the past
|
||||
if (oldLabels.isEmpty()) {
|
||||
// update labels
|
||||
NodeLabel label = labelCollections.get(NO_LABEL);
|
||||
RMNodeLabel label = labelCollections.get(NO_LABEL);
|
||||
label.removeNode(oldNM.resource);
|
||||
|
||||
// update queues, all queue can access this node
|
||||
|
@ -393,7 +391,7 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
} else {
|
||||
// update labels
|
||||
for (String labelName : oldLabels) {
|
||||
NodeLabel label = labelCollections.get(labelName);
|
||||
RMNodeLabel label = labelCollections.get(labelName);
|
||||
if (null == label) {
|
||||
continue;
|
||||
}
|
||||
|
@ -418,7 +416,7 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
// no label in the past
|
||||
if (newLabels.isEmpty()) {
|
||||
// update labels
|
||||
NodeLabel label = labelCollections.get(NO_LABEL);
|
||||
RMNodeLabel label = labelCollections.get(NO_LABEL);
|
||||
label.addNode(newNM.resource);
|
||||
|
||||
// update queues, all queue can access this node
|
||||
|
@ -428,7 +426,7 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
} else {
|
||||
// update labels
|
||||
for (String labelName : newLabels) {
|
||||
NodeLabel label = labelCollections.get(labelName);
|
||||
RMNodeLabel label = labelCollections.get(labelName);
|
||||
label.addNode(newNM.resource);
|
||||
}
|
||||
|
||||
|
@ -499,13 +497,13 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
|
|||
this.rmContext = rmContext;
|
||||
}
|
||||
|
||||
public List<NodeLabel> pullRMNodeLabelsInfo() {
|
||||
public List<RMNodeLabel> pullRMNodeLabelsInfo() {
|
||||
try {
|
||||
readLock.lock();
|
||||
List<NodeLabel> infos = new ArrayList<NodeLabel>();
|
||||
List<RMNodeLabel> infos = new ArrayList<RMNodeLabel>();
|
||||
|
||||
for (Entry<String, NodeLabel> entry : labelCollections.entrySet()) {
|
||||
NodeLabel label = entry.getValue();
|
||||
for (Entry<String, RMNodeLabel> entry : labelCollections.entrySet()) {
|
||||
RMNodeLabel label = entry.getValue();
|
||||
infos.add(label.getCopy());
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
|||
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
|
||||
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeLabel;
|
||||
import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
|
@ -55,7 +55,7 @@ public class NodeLabelsPage extends RmView {
|
|||
tbody();
|
||||
|
||||
RMNodeLabelsManager nlm = rm.getRMContext().getNodeLabelManager();
|
||||
for (NodeLabel info : nlm.pullRMNodeLabelsInfo()) {
|
||||
for (RMNodeLabel info : nlm.pullRMNodeLabelsInfo()) {
|
||||
TR<TBODY<TABLE<Hamlet>>> row =
|
||||
tbody.tr().td(
|
||||
info.getLabelName().isEmpty() ? "<NO_LABEL>" : info
|
||||
|
|
|
@ -20,11 +20,13 @@ package org.apache.hadoop.yarn.server.resourcemanager.nodelabels;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeLabel;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeLabelsStore;
|
||||
|
||||
|
@ -63,6 +65,12 @@ public class NullRMNodeLabelsManager extends RMNodeLabelsManager {
|
|||
public void close() throws IOException {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateNodeLabels(List<NodeLabel> updatedNodeLabels)
|
||||
throws IOException {
|
||||
// do nothing
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
|
|||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeLabel;
|
||||
import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
|
||||
import org.apache.hadoop.yarn.nodelabels.NodeLabelTestBase;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
import org.junit.After;
|
||||
|
@ -476,8 +476,8 @@ public class TestRMNodeLabelsManager extends NodeLabelTestBase {
|
|||
|
||||
}
|
||||
|
||||
private void checkNodeLabelInfo(List<NodeLabel> infos, String labelName, int activeNMs, int memory) {
|
||||
for (NodeLabel info : infos) {
|
||||
private void checkNodeLabelInfo(List<RMNodeLabel> infos, String labelName, int activeNMs, int memory) {
|
||||
for (RMNodeLabel info : infos) {
|
||||
if (info.getLabelName().equals(labelName)) {
|
||||
Assert.assertEquals(activeNMs, info.getNumActiveNMs());
|
||||
Assert.assertEquals(memory, info.getResource().getMemory());
|
||||
|
@ -499,7 +499,7 @@ public class TestRMNodeLabelsManager extends NodeLabelTestBase {
|
|||
toNodeId("n2"), toSet("x"), toNodeId("n3"), toSet("y")));
|
||||
|
||||
// x, y, z and ""
|
||||
List<NodeLabel> infos = mgr.pullRMNodeLabelsInfo();
|
||||
List<RMNodeLabel> infos = mgr.pullRMNodeLabelsInfo();
|
||||
Assert.assertEquals(4, infos.size());
|
||||
checkNodeLabelInfo(infos, RMNodeLabelsManager.NO_LABEL, 2, 20);
|
||||
checkNodeLabelInfo(infos, "x", 2, 20);
|
||||
|
|
Loading…
Reference in New Issue