Merge trunk into HA branch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1207490 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
73b3de6204
|
@ -4,6 +4,11 @@ Trunk (unreleased changes)
|
|||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HADOOP-7777 Implement a base class for DNSToSwitchMapping implementations
|
||||
that can offer extra topology information. (stevel)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
|
||||
|
@ -118,6 +123,9 @@ Release 0.23.1 - Unreleased
|
|||
HADOOP-7802. Hadoop scripts unconditionally source
|
||||
"$bin"/../libexec/hadoop-config.sh. (Bruno Mahé via tomwhite)
|
||||
|
||||
HADOOP-7858. Drop some info logging to DEBUG level in IPC,
|
||||
metrics, and HTTP. (todd via eli)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -131,6 +139,8 @@ Release 0.23.1 - Unreleased
|
|||
HADOOP-6614. RunJar should provide more diags when it can't create
|
||||
a temp file. (Jonathan Hsieh via eli)
|
||||
|
||||
HADOOP-7859. TestViewFsHdfs.testgetFileLinkStatus is failing an assert. (eli)
|
||||
|
||||
Release 0.23.0 - 2011-11-01
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -1096,6 +1106,12 @@ Release 0.22.0 - Unreleased
|
|||
|
||||
HADOOP-7786. Remove HDFS-specific config keys defined in FsConfig. (eli)
|
||||
|
||||
HADOOP-7358. Improve log levels when exceptions caught in RPC handler
|
||||
(Todd Lipcon via shv)
|
||||
|
||||
HADOOP-7861. changes2html.pl generates links to HADOOP, HDFS, and MAPREDUCE
|
||||
jiras. (shv)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-6884. Add LOG.isDebugEnabled() guard for each LOG.debug(..).
|
||||
|
|
|
@ -242,7 +242,11 @@ for my $rel (@releases) {
|
|||
|
||||
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
||||
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA
|
||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA Common
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)} # Link to JIRA Hdfs
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)} # Link to JIRA MR
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
print " <li>$item</li>\n";
|
||||
}
|
||||
|
|
|
@ -1099,18 +1099,10 @@ public final class FileContext {
|
|||
*/
|
||||
private Path qualifySymlinkTarget(final AbstractFileSystem pathFS,
|
||||
Path pathWithLink, Path target) {
|
||||
/* NB: makeQualified uses the target's scheme and authority, if
|
||||
* specified, and the scheme and authority of pathFS, if not. If
|
||||
* the path does have a scheme and authority we assert they match
|
||||
* those of pathFS since resolve updates the file system of a path
|
||||
* that contains links each time a link is encountered.
|
||||
*/
|
||||
// NB: makeQualified uses the target's scheme and authority, if
|
||||
// specified, and the scheme and authority of pathFS, if not.
|
||||
final String scheme = target.toUri().getScheme();
|
||||
final String auth = target.toUri().getAuthority();
|
||||
if (scheme != null && auth != null) {
|
||||
assert scheme.equals(pathFS.getUri().getScheme());
|
||||
assert auth.equals(pathFS.getUri().getAuthority());
|
||||
}
|
||||
return (scheme == null && auth == null)
|
||||
? target.makeQualified(pathFS.getUri(), pathWithLink.getParent())
|
||||
: target;
|
||||
|
|
|
@ -644,12 +644,12 @@ public class HttpServer implements FilterContainer {
|
|||
while (true) {
|
||||
try {
|
||||
port = webServer.getConnectors()[0].getLocalPort();
|
||||
LOG.info("Port returned by webServer.getConnectors()[0]." +
|
||||
LOG.debug("Port returned by webServer.getConnectors()[0]." +
|
||||
"getLocalPort() before open() is "+ port +
|
||||
". Opening the listener on " + oriPort);
|
||||
listener.open();
|
||||
port = listener.getLocalPort();
|
||||
LOG.info("listener.getLocalPort() returned " + listener.getLocalPort() +
|
||||
LOG.debug("listener.getLocalPort() returned " + listener.getLocalPort() +
|
||||
" webServer.getConnectors()[0].getLocalPort() returned " +
|
||||
webServer.getConnectors()[0].getLocalPort());
|
||||
//Workaround to handle the problem reported in HADOOP-4744
|
||||
|
|
|
@ -1498,7 +1498,7 @@ public abstract class Server {
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.info(getName() + ": starting");
|
||||
LOG.debug(getName() + ": starting");
|
||||
SERVER.set(Server.this);
|
||||
ByteArrayOutputStream buf =
|
||||
new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
|
||||
|
@ -1536,7 +1536,16 @@ public abstract class Server {
|
|||
);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
LOG.info(getName() + ", call: " + call + ", error: ", e);
|
||||
String logMsg = getName() + ", call " + call + ": error: " + e;
|
||||
if (e instanceof RuntimeException || e instanceof Error) {
|
||||
// These exception types indicate something is probably wrong
|
||||
// on the server side, as opposed to just a normal exceptional
|
||||
// result.
|
||||
LOG.warn(logMsg, e);
|
||||
} else {
|
||||
LOG.info(logMsg, e);
|
||||
}
|
||||
|
||||
errorClass = e.getClass().getName();
|
||||
error = StringUtils.stringifyException(e);
|
||||
// Remove redundant error class name from the beginning of the stack trace
|
||||
|
@ -1571,7 +1580,7 @@ public abstract class Server {
|
|||
LOG.info(getName() + " caught an exception", e);
|
||||
}
|
||||
}
|
||||
LOG.info(getName() + ": exiting");
|
||||
LOG.debug(getName() + ": exiting");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -389,7 +389,7 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
}
|
||||
protocolImplMap.put(new ProtoNameVer(protocolName, version),
|
||||
new ProtoClassProtoImpl(protocolClass, protocolImpl));
|
||||
LOG.info("Protocol Name = " + protocolName + " version=" + version +
|
||||
LOG.debug("Protocol Name = " + protocolName + " version=" + version +
|
||||
" ProtocolImpl=" + protocolImpl.getClass().getName() +
|
||||
" protocolClass=" + protocolClass.getName());
|
||||
}
|
||||
|
|
|
@ -241,7 +241,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
injectedTags, period, config.subset(SOURCE_KEY));
|
||||
sources.put(name, sa);
|
||||
sa.start();
|
||||
LOG.info("Registered source "+ name);
|
||||
LOG.debug("Registered source "+ name);
|
||||
}
|
||||
|
||||
@Override public synchronized <T extends MetricsSink>
|
||||
|
@ -405,8 +405,8 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
private synchronized void stopSources() {
|
||||
for (Entry<String, MetricsSourceAdapter> entry : sources.entrySet()) {
|
||||
MetricsSourceAdapter sa = entry.getValue();
|
||||
LOG.info("Stopping metrics source "+ entry.getKey());
|
||||
LOG.debug(sa.source().getClass());
|
||||
LOG.debug("Stopping metrics source "+ entry.getKey() +
|
||||
": class=" + sa.source().getClass());
|
||||
sa.stop();
|
||||
}
|
||||
sysSource.stop();
|
||||
|
@ -416,8 +416,8 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
private synchronized void stopSinks() {
|
||||
for (Entry<String, MetricsSinkAdapter> entry : sinks.entrySet()) {
|
||||
MetricsSinkAdapter sa = entry.getValue();
|
||||
LOG.info("Stopping metrics sink "+ entry.getKey());
|
||||
LOG.debug(sa.sink().getClass());
|
||||
LOG.debug("Stopping metrics sink "+ entry.getKey() +
|
||||
": class=" + sa.sink().getClass());
|
||||
sa.stop();
|
||||
}
|
||||
sinks.clear();
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* This is a base class for DNS to Switch mappings. <p/> It is not mandatory to
|
||||
* derive {@link DNSToSwitchMapping} implementations from it, but it is strongly
|
||||
* recommended, as it makes it easy for the Hadoop developers to add new methods
|
||||
* to this base class that are automatically picked up by all implementations.
|
||||
* <p/>
|
||||
*
|
||||
* This class does not extend the <code>Configured</code>
|
||||
* base class, and should not be changed to do so, as it causes problems
|
||||
* for subclasses. The constructor of the <code>Configured</code> calls
|
||||
* the {@link #setConf(Configuration)} method, which will call into the
|
||||
* subclasses before they have been fully constructed.
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class AbstractDNSToSwitchMapping
|
||||
implements DNSToSwitchMapping, Configurable {
|
||||
|
||||
private Configuration conf;
|
||||
|
||||
/**
|
||||
* Create an unconfigured instance
|
||||
*/
|
||||
protected AbstractDNSToSwitchMapping() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an instance, caching the configuration file.
|
||||
* This constructor does not call {@link #setConf(Configuration)}; if
|
||||
* a subclass extracts information in that method, it must call it explicitly.
|
||||
* @param conf the configuration
|
||||
*/
|
||||
protected AbstractDNSToSwitchMapping(Configuration conf) {
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConf() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setConf(Configuration conf) {
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Predicate that indicates that the switch mapping is known to be
|
||||
* single-switch. The base class returns false: it assumes all mappings are
|
||||
* multi-rack. Subclasses may override this with methods that are more aware
|
||||
* of their topologies.
|
||||
*
|
||||
* <p/>
|
||||
*
|
||||
* This method is used when parts of Hadoop need know whether to apply
|
||||
* single rack vs multi-rack policies, such as during block placement.
|
||||
* Such algorithms behave differently if they are on multi-switch systems.
|
||||
* </p>
|
||||
*
|
||||
* @return true if the mapping thinks that it is on a single switch
|
||||
*/
|
||||
public boolean isSingleSwitch() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query for a {@link DNSToSwitchMapping} instance being on a single
|
||||
* switch.
|
||||
* <p/>
|
||||
* This predicate simply assumes that all mappings not derived from
|
||||
* this class are multi-switch.
|
||||
* @param mapping the mapping to query
|
||||
* @return true if the base class says it is single switch, or the mapping
|
||||
* is not derived from this class.
|
||||
*/
|
||||
public static boolean isMappingSingleSwitch(DNSToSwitchMapping mapping) {
|
||||
return mapping instanceof AbstractDNSToSwitchMapping
|
||||
&& ((AbstractDNSToSwitchMapping) mapping).isSingleSwitch();
|
||||
}
|
||||
|
||||
}
|
|
@ -34,9 +34,13 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class CachedDNSToSwitchMapping implements DNSToSwitchMapping {
|
||||
public class CachedDNSToSwitchMapping extends AbstractDNSToSwitchMapping {
|
||||
private Map<String, String> cache = new ConcurrentHashMap<String, String>();
|
||||
protected DNSToSwitchMapping rawMapping;
|
||||
|
||||
/**
|
||||
* The uncached mapping
|
||||
*/
|
||||
protected final DNSToSwitchMapping rawMapping;
|
||||
|
||||
/**
|
||||
* cache a raw DNS mapping
|
||||
|
@ -118,4 +122,14 @@ public class CachedDNSToSwitchMapping implements DNSToSwitchMapping {
|
|||
return getCachedHosts(names);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Delegate the switch topology query to the raw mapping, via
|
||||
* {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}
|
||||
* @return true iff the raw mapper is considered single-switch.
|
||||
*/
|
||||
@Override
|
||||
public boolean isSingleSwitch() {
|
||||
return isMappingSingleSwitch(rawMapping);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,12 @@ public interface DNSToSwitchMapping {
|
|||
* Note the hostname/ip-address is not part of the returned path.
|
||||
* The network topology of the cluster would determine the number of
|
||||
* components in the network path.
|
||||
* <p/>
|
||||
*
|
||||
* If a name cannot be resolved to a rack, the implementation
|
||||
* should return {@link NetworkTopology#DEFAULT_RACK}. This
|
||||
* is what the bundled implementations do, though it is not a formal requirement
|
||||
*
|
||||
* @param names the list of hosts to resolve (can be empty)
|
||||
* @return list of resolved network paths.
|
||||
* If <i>names</i> is empty, the returned list is also empty
|
||||
|
|
|
@ -32,16 +32,21 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
|
||||
/**
|
||||
* This class implements the {@link DNSToSwitchMapping} interface using a
|
||||
* script configured via the {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY}
|
||||
* script configured via the
|
||||
* {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY} option.
|
||||
* <p/>
|
||||
* It contains a static class <code>RawScriptBasedMapping</code> that performs
|
||||
* the work: reading the configuration parameters, executing any defined
|
||||
* script, handling errors and such like. The outer
|
||||
* class extends {@link CachedDNSToSwitchMapping} to cache the delegated
|
||||
* queries.
|
||||
* <p/>
|
||||
* This DNS mapper's {@link #isSingleSwitch()} predicate returns
|
||||
* true if and only if a script is defined.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public final class ScriptBasedMapping extends CachedDNSToSwitchMapping
|
||||
implements Configurable
|
||||
{
|
||||
public ScriptBasedMapping() {
|
||||
super(new RawScriptBasedMapping());
|
||||
}
|
||||
public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
|
||||
|
||||
/**
|
||||
* Minimum number of arguments: {@value}
|
||||
|
@ -65,6 +70,18 @@ implements Configurable
|
|||
static final String SCRIPT_ARG_COUNT_KEY =
|
||||
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
|
||||
|
||||
/**
|
||||
* Create an instance with the default configuration.
|
||||
* </p>
|
||||
* Calling {@link #setConf(Configuration)} will trigger a
|
||||
* re-evaluation of the configuration settings and so be used to
|
||||
* set up the mapping script.
|
||||
*
|
||||
*/
|
||||
public ScriptBasedMapping() {
|
||||
super(new RawScriptBasedMapping());
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an instance from the given configuration
|
||||
* @param conf configuration
|
||||
|
@ -74,14 +91,31 @@ implements Configurable
|
|||
setConf(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConf() {
|
||||
return ((RawScriptBasedMapping)rawMapping).getConf();
|
||||
/**
|
||||
* Get the cached mapping and convert it to its real type
|
||||
* @return the inner raw script mapping.
|
||||
*/
|
||||
private RawScriptBasedMapping getRawMapping() {
|
||||
return (RawScriptBasedMapping)rawMapping;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConf() {
|
||||
return getRawMapping().getConf();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* This will get called in the superclass constructor, so a check is needed
|
||||
* to ensure that the raw mapping is defined before trying to relaying a null
|
||||
* configuration.
|
||||
* @param conf
|
||||
*/
|
||||
@Override
|
||||
public void setConf(Configuration conf) {
|
||||
((RawScriptBasedMapping)rawMapping).setConf(conf);
|
||||
super.setConf(conf);
|
||||
getRawMapping().setConf(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -89,29 +123,26 @@ implements Configurable
|
|||
* by the superclass {@link CachedDNSToSwitchMapping}
|
||||
*/
|
||||
private static final class RawScriptBasedMapping
|
||||
implements DNSToSwitchMapping {
|
||||
extends AbstractDNSToSwitchMapping {
|
||||
private String scriptName;
|
||||
private Configuration conf;
|
||||
private int maxArgs; //max hostnames per call of the script
|
||||
private static Log LOG =
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ScriptBasedMapping.class);
|
||||
|
||||
/**
|
||||
* Set the configuration and
|
||||
* @param conf extract the configuration parameters of interest
|
||||
* Set the configuration and extract the configuration parameters of interest
|
||||
* @param conf the new configuration
|
||||
*/
|
||||
@Override
|
||||
public void setConf (Configuration conf) {
|
||||
this.scriptName = conf.get(SCRIPT_FILENAME_KEY);
|
||||
this.maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
|
||||
this.conf = conf;
|
||||
super.setConf(conf);
|
||||
if (conf != null) {
|
||||
scriptName = conf.get(SCRIPT_FILENAME_KEY);
|
||||
maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
|
||||
} else {
|
||||
scriptName = null;
|
||||
maxArgs = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the configuration
|
||||
* @return the configuration
|
||||
*/
|
||||
public Configuration getConf () {
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -122,14 +153,14 @@ implements Configurable
|
|||
|
||||
@Override
|
||||
public List<String> resolve(List<String> names) {
|
||||
List <String> m = new ArrayList<String>(names.size());
|
||||
List<String> m = new ArrayList<String>(names.size());
|
||||
|
||||
if (names.isEmpty()) {
|
||||
return m;
|
||||
}
|
||||
|
||||
if (scriptName == null) {
|
||||
for (int i = 0; i < names.size(); i++) {
|
||||
for (String name : names) {
|
||||
m.add(NetworkTopology.DEFAULT_RACK);
|
||||
}
|
||||
return m;
|
||||
|
@ -195,10 +226,10 @@ implements Configurable
|
|||
dir = new File(userDir);
|
||||
}
|
||||
ShellCommandExecutor s = new ShellCommandExecutor(
|
||||
cmdList.toArray(new String[0]), dir);
|
||||
cmdList.toArray(new String[cmdList.size()]), dir);
|
||||
try {
|
||||
s.execute();
|
||||
allOutput.append(s.getOutput() + " ");
|
||||
allOutput.append(s.getOutput()).append(" ");
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception: ", e);
|
||||
return null;
|
||||
|
@ -207,5 +238,15 @@ implements Configurable
|
|||
}
|
||||
return allOutput.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Declare that the mapper is single-switched if a script was not named
|
||||
* in the configuration.
|
||||
* @return true iff there is no script
|
||||
*/
|
||||
@Override
|
||||
public boolean isSingleSwitch() {
|
||||
return scriptName == null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,34 +17,80 @@
|
|||
*/
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Implements the {@link DNSToSwitchMapping} via static mappings. Used
|
||||
* in testcases that simulate racks.
|
||||
* in testcases that simulate racks, and in the
|
||||
* {@link org.apache.hadoop.hdfs.MiniDFSCluster}
|
||||
*
|
||||
* A shared, static mapping is used; to reset it call {@link #resetMap()}.
|
||||
*
|
||||
* When an instance of the class has its {@link #setConf(Configuration)}
|
||||
* method called, nodes listed in the configuration will be added to the map.
|
||||
* These do not get removed when the instance is garbage collected.
|
||||
*/
|
||||
public class StaticMapping extends Configured implements DNSToSwitchMapping {
|
||||
public void setconf(Configuration conf) {
|
||||
String[] mappings = conf.getStrings("hadoop.configured.node.mapping");
|
||||
public class StaticMapping extends AbstractDNSToSwitchMapping {
|
||||
|
||||
/**
|
||||
* key to define the node mapping as a comma-delimited list of host=rack
|
||||
* mappings, e.g. <code>host1=r1,host2=r1,host3=r2</code>.
|
||||
* </p>
|
||||
* <b>Important: </b>spaces not trimmed and are considered significant.
|
||||
*/
|
||||
public static final String KEY_HADOOP_CONFIGURED_NODE_MAPPING =
|
||||
"hadoop.configured.node.mapping";
|
||||
|
||||
/**
|
||||
* Configure the mapping by extracting any mappings defined in the
|
||||
* {@link #KEY_HADOOP_CONFIGURED_NODE_MAPPING} field
|
||||
* @param conf new configuration
|
||||
*/
|
||||
@Override
|
||||
public void setConf(Configuration conf) {
|
||||
super.setConf(conf);
|
||||
if (conf != null) {
|
||||
String[] mappings = conf.getStrings(KEY_HADOOP_CONFIGURED_NODE_MAPPING);
|
||||
if (mappings != null) {
|
||||
for (int i = 0; i < mappings.length; i++) {
|
||||
String str = mappings[i];
|
||||
for (String str : mappings) {
|
||||
String host = str.substring(0, str.indexOf('='));
|
||||
String rack = str.substring(str.indexOf('=') + 1);
|
||||
addNodeToRack(host, rack);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Only one instance per JVM */
|
||||
private static Map<String, String> nameToRackMap = new HashMap<String, String>();
|
||||
}
|
||||
|
||||
static synchronized public void addNodeToRack(String name, String rackId) {
|
||||
/**
|
||||
* retained lower case setter for compatibility reasons; relays to
|
||||
* {@link #setConf(Configuration)}
|
||||
* @param conf new configuration
|
||||
*/
|
||||
public void setconf(Configuration conf) {
|
||||
setConf(conf);
|
||||
}
|
||||
|
||||
/* Only one instance per JVM */
|
||||
private static final Map<String, String> nameToRackMap = new HashMap<String, String>();
|
||||
|
||||
/**
|
||||
* Add a node to the static map. The moment any entry is added to the map,
|
||||
* the map goes multi-rack.
|
||||
* @param name node name
|
||||
* @param rackId rack ID
|
||||
*/
|
||||
public static void addNodeToRack(String name, String rackId) {
|
||||
synchronized (nameToRackMap) {
|
||||
nameToRackMap.put(name, rackId);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> resolve(List<String> names) {
|
||||
List<String> m = new ArrayList<String>();
|
||||
synchronized (nameToRackMap) {
|
||||
|
@ -59,4 +105,24 @@ public class StaticMapping extends Configured implements DNSToSwitchMapping {
|
|||
return m;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This mapping is only single switch if the map is empty
|
||||
* @return the current switching status
|
||||
*/
|
||||
@Override
|
||||
public boolean isSingleSwitch() {
|
||||
synchronized (nameToRackMap) {
|
||||
return nameToRackMap.isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the map and revert to being a single switch
|
||||
*/
|
||||
public static void resetMap() {
|
||||
synchronized (nameToRackMap) {
|
||||
nameToRackMap.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,30 +23,59 @@ import java.util.List;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestScriptBasedMapping extends TestCase {
|
||||
|
||||
private ScriptBasedMapping mapping;
|
||||
private Configuration conf;
|
||||
private List<String> names;
|
||||
|
||||
|
||||
public TestScriptBasedMapping() {
|
||||
mapping = new ScriptBasedMapping();
|
||||
|
||||
conf = new Configuration();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoArgsMeansNoResult() {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
|
||||
ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
|
||||
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
||||
|
||||
mapping.setConf(conf);
|
||||
}
|
||||
|
||||
public void testNoArgsMeansNoResult() {
|
||||
names = new ArrayList<String>();
|
||||
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
||||
ScriptBasedMapping mapping = createMapping(conf);
|
||||
List<String> names = new ArrayList<String>();
|
||||
names.add("some.machine.name");
|
||||
names.add("other.machine.name");
|
||||
List<String> result = mapping.resolve(names);
|
||||
assertNull(result);
|
||||
assertNull("Expected an empty list", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoFilenameMeansSingleSwitch() throws Throwable {
|
||||
Configuration conf = new Configuration();
|
||||
ScriptBasedMapping mapping = createMapping(conf);
|
||||
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
|
||||
assertTrue("Expected to be single switch",
|
||||
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFilenameMeansMultiSwitch() throws Throwable {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
||||
ScriptBasedMapping mapping = createMapping(conf);
|
||||
assertFalse("Expected to be multi switch", mapping.isSingleSwitch());
|
||||
mapping.setConf(new Configuration());
|
||||
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullConfig() throws Throwable {
|
||||
ScriptBasedMapping mapping = createMapping(null);
|
||||
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
|
||||
|
||||
}
|
||||
private ScriptBasedMapping createMapping(Configuration conf) {
|
||||
ScriptBasedMapping mapping = new ScriptBasedMapping();
|
||||
mapping.setConf(conf);
|
||||
return mapping;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Test the static mapping class.
|
||||
* Because the map is actually static, this map needs to be reset for every test
|
||||
*/
|
||||
public class TestStaticMapping extends Assert {
|
||||
|
||||
/**
|
||||
* Reset the map then create a new instance of the {@link StaticMapping}
|
||||
* class
|
||||
* @return a new instance
|
||||
*/
|
||||
private StaticMapping newInstance() {
|
||||
StaticMapping.resetMap();
|
||||
return new StaticMapping();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStaticIsSingleSwitch() throws Throwable {
|
||||
StaticMapping mapping = newInstance();
|
||||
assertTrue("Empty maps are not single switch", mapping.isSingleSwitch());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testCachingRelaysQueries() throws Throwable {
|
||||
StaticMapping staticMapping = newInstance();
|
||||
CachedDNSToSwitchMapping mapping =
|
||||
new CachedDNSToSwitchMapping(staticMapping);
|
||||
assertTrue("Expected single switch", mapping.isSingleSwitch());
|
||||
StaticMapping.addNodeToRack("n1", "r1");
|
||||
assertFalse("Expected to be multi switch",
|
||||
mapping.isSingleSwitch());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddResolveNodes() throws Throwable {
|
||||
StaticMapping mapping = newInstance();
|
||||
StaticMapping.addNodeToRack("n1", "r1");
|
||||
List<String> l1 = new ArrayList<String>(2);
|
||||
l1.add("n1");
|
||||
l1.add("unknown");
|
||||
List<String> mappings = mapping.resolve(l1);
|
||||
assertEquals(2, mappings.size());
|
||||
assertEquals("r1", mappings.get(0));
|
||||
assertEquals(NetworkTopology.DEFAULT_RACK, mappings.get(1));
|
||||
assertFalse("Mapping is still single switch", mapping.isSingleSwitch());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadNodesFromConfig() throws Throwable {
|
||||
StaticMapping mapping = newInstance();
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, "n1=r1,n2=r2");
|
||||
mapping.setConf(conf);
|
||||
List<String> l1 = new ArrayList<String>(3);
|
||||
l1.add("n1");
|
||||
l1.add("unknown");
|
||||
l1.add("n2");
|
||||
List<String> mappings = mapping.resolve(l1);
|
||||
assertEquals(3, mappings.size());
|
||||
assertEquals("r1", mappings.get(0));
|
||||
assertEquals(NetworkTopology.DEFAULT_RACK, mappings.get(1));
|
||||
assertEquals("r2", mappings.get(2));
|
||||
assertFalse("Expected to be multi switch",
|
||||
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullConfiguration() throws Throwable {
|
||||
StaticMapping mapping = newInstance();
|
||||
mapping.setConf(null);
|
||||
assertTrue("Null maps is not single switch", mapping.isSingleSwitch());
|
||||
assertTrue("Expected to be single switch",
|
||||
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Test some other details of the switch mapping
|
||||
*/
|
||||
public class TestSwitchMapping extends Assert {
|
||||
|
||||
@Test
|
||||
public void testStandaloneClassesAssumedMultiswitch() throws Throwable {
|
||||
DNSToSwitchMapping mapping = new StandaloneSwitchMapping();
|
||||
assertFalse("Expected to be multi switch",
|
||||
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testCachingRelays() throws Throwable {
|
||||
CachedDNSToSwitchMapping mapping =
|
||||
new CachedDNSToSwitchMapping(new StandaloneSwitchMapping());
|
||||
assertFalse("Expected to be multi switch",
|
||||
mapping.isSingleSwitch());
|
||||
}
|
||||
|
||||
private static class StandaloneSwitchMapping implements DNSToSwitchMapping {
|
||||
@Override
|
||||
public List<String> resolve(List<String> names) {
|
||||
return names;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -11,6 +11,8 @@ Trunk (unreleased changes)
|
|||
|
||||
HDFS-2520. Add protobuf service for InterDatanodeProtocol. (suresh)
|
||||
|
||||
HDFS-2519. Add protobuf service for DatanodeProtocol. (suresh)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7524 Change RPC to allow multiple protocols including multuple
|
||||
|
@ -122,6 +124,9 @@ Release 0.23.1 - UNRELEASED
|
|||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-2316. [umbrella] webhdfs: a complete FileSystem implementation for
|
||||
accessing HDFS over HTTP (szetszwo)
|
||||
|
||||
IMPROVEMENTS
|
||||
HDFS-2560. Refactor BPOfferService to be a static inner class (todd)
|
||||
|
||||
|
@ -151,6 +156,10 @@ Release 0.23.1 - UNRELEASED
|
|||
|
||||
HDFS-2566. Move BPOfferService to be a non-inner class. (todd)
|
||||
|
||||
HDFS-2552. Add Forrest doc for WebHDFS REST API. (szetszwo)
|
||||
|
||||
HDFS-2587. Add apt doc for WebHDFS REST API. (szetszwo)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
||||
|
@ -178,6 +187,10 @@ Release 0.23.1 - UNRELEASED
|
|||
|
||||
HDFS-2575. DFSTestUtil may create empty files (todd)
|
||||
|
||||
HDFS-2588. hdfs jsp pages missing DOCTYPE. (Dave Vronay via mattf)
|
||||
|
||||
HDFS-2590. Fix the missing links in the WebHDFS forrest doc. (szetszwo)
|
||||
|
||||
Release 0.23.0 - 2011-11-01
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -242,7 +242,11 @@ for my $rel (@releases) {
|
|||
|
||||
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
||||
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA
|
||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA Common
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)} # Link to JIRA Hdfs
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)} # Link to JIRA MR
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
print " <li>$item</li>\n";
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ See http://forrest.apache.org/docs/linking.html for more info.
|
|||
<hdfs_SLG label="Synthetic Load Generator" href="SLG_user_guide.html" />
|
||||
<hdfs_imageviewer label="Offline Image Viewer" href="hdfs_imageviewer.html" />
|
||||
<hdfs_editsviewer label="Offline Edits Viewer" href="hdfs_editsviewer.html" />
|
||||
<webhdfs label="WebHDFS REST API" href="webhdfs.html" />
|
||||
<hftp label="HFTP" href="hftp.html"/>
|
||||
<faultinject_framework label="Fault Injection" href="faultinject_framework.html" />
|
||||
<hdfs_libhdfs label="C API libhdfs" href="libhdfs.html" />
|
||||
|
@ -119,8 +120,33 @@ See http://forrest.apache.org/docs/linking.html for more info.
|
|||
</distributedcache>
|
||||
</filecache>
|
||||
<fs href="fs/">
|
||||
<filesystem href="FileSystem.html" />
|
||||
<FileStatus href="FileStatus.html" />
|
||||
<Path href="Path.html" />
|
||||
|
||||
<filesystem href="FileSystem.html">
|
||||
<open href="#open(org.apache.hadoop.fs.Path,%20int)" />
|
||||
<getFileStatus href="#getFileStatus(org.apache.hadoop.fs.Path)" />
|
||||
<listStatus href="#listStatus(org.apache.hadoop.fs.Path)" />
|
||||
<getContentSummary href="#getContentSummary(org.apache.hadoop.fs.Path)" />
|
||||
<getFileChecksum href="#getFileChecksum(org.apache.hadoop.fs.Path)" />
|
||||
<getHomeDirectory href="#getHomeDirectory()" />
|
||||
<getDelegationToken href="#getDelegationToken(org.apache.hadoop.io.Text)" />
|
||||
|
||||
<create href="#create(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission,%20boolean,%20int,%20short,%20long,%20org.apache.hadoop.util.Progressable)" />
|
||||
<mkdirs href="#mkdirs(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission)" />
|
||||
<rename href="#rename(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.Options.Rename...)" />
|
||||
<setReplication href="#setReplication(org.apache.hadoop.fs.Path,%20short)" />
|
||||
<setOwner href="#setOwner(org.apache.hadoop.fs.Path,%20java.lang.String,%20java.lang.String)" />
|
||||
<setPermission href="#setPermission(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission)" />
|
||||
<setTimes href="#setTimes(org.apache.hadoop.fs.Path,%20long,%20long)" />
|
||||
|
||||
<append href="#append(org.apache.hadoop.fs.Path,%20int,%20org.apache.hadoop.util.Progressable)" />
|
||||
<delete href="#delete(org.apache.hadoop.fs.Path,%20boolean)" />
|
||||
</filesystem>
|
||||
</fs>
|
||||
|
||||
|
||||
|
||||
<io href="io/">
|
||||
<closeable href="Closeable.html">
|
||||
<close href="#close()" />
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -27,7 +27,7 @@
|
|||
//for java.io.Serializable
|
||||
private static final long serialVersionUID = 1L;
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
//for java.io.Serializable
|
||||
private static final long serialVersionUID = 1L;
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<style type=text/css>
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
//for java.io.Serializable
|
||||
private static final long serialVersionUID = 1L;
|
||||
%>
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
int corruptFileCount = corruptFileBlocks.size();
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||
<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||
|
|
|
@ -33,6 +33,7 @@ FSNamesystem fsn = nn.getNamesystem();
|
|||
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
//for java.io.Serializable
|
||||
private static final long serialVersionUID = 1L;
|
||||
%>
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<title></title>
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
private static final long serialVersionUID = 1L;
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||
<title>Hadoop SecondaryNameNode</title>
|
||||
|
|
|
@ -0,0 +1,344 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains protocol buffers that are used throughout HDFS -- i.e.
|
||||
// by the client, server, and data transfer protocols.
|
||||
|
||||
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||
option java_outer_classname = "DatanodeProtocolProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
import "hdfs.proto";
|
||||
|
||||
/**
|
||||
* Information to identify a datanode to a namenode
|
||||
*/
|
||||
message DatanodeRegistrationProto {
|
||||
required DatanodeIDProto datanodeID = 1; // Datanode information
|
||||
required StorageInfoProto storateInfo = 2; // Node information
|
||||
required ExportedBlockKeysProto keys = 3; // Block keys
|
||||
}
|
||||
|
||||
/**
|
||||
* Commands sent from namenode to the datanodes
|
||||
*/
|
||||
message DatanodeCommandProto {
|
||||
enum Type {
|
||||
BalancerBandwidthCommand = 0;
|
||||
BlockCommand = 1;
|
||||
BlockRecoveryCommand = 2;
|
||||
FinalizeCommand = 3;
|
||||
KeyUpdateCommand = 4;
|
||||
RegisterCommand = 5;
|
||||
UpgradeCommand = 6;
|
||||
}
|
||||
|
||||
required Type cmdType = 1; // Type of the command
|
||||
|
||||
// One of the following command is available when the corresponding
|
||||
// cmdType is set
|
||||
optional BalancerBandwidthCommandProto balancerCmd = 2;
|
||||
optional BlockCommandProto blkCmd = 3;
|
||||
optional BlockRecoveryCommndProto recoveryCmd = 4;
|
||||
optional FinalizeCommandProto finalizeCmd = 5;
|
||||
optional KeyUpdateCommandProto keyUpdateCmd = 6;
|
||||
optional RegisterCommandProto registerCmd = 7;
|
||||
optional UpgradeCommandProto upgradeCmd = 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* Command sent from namenode to datanode to set the
|
||||
* maximum bandwidth to be used for balancing.
|
||||
*/
|
||||
message BalancerBandwidthCommandProto {
|
||||
|
||||
// Maximum bandwidth to be used by datanode for balancing
|
||||
required uint64 bandwidth = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Command to instruct datanodes to perform certain action
|
||||
* on the given set of blocks.
|
||||
*/
|
||||
message BlockCommandProto {
|
||||
enum Action {
|
||||
UNKNOWN = 0; // Unknown action
|
||||
TRANSFER = 1; // Transfer blocks to another datanode
|
||||
INVALIDATE = 2; // Invalidate blocks
|
||||
SHUTDOWN = 3; // Shutdown node
|
||||
}
|
||||
required uint32 action = 1;
|
||||
required string blockPoolId = 2;
|
||||
repeated BlockProto blocks = 3;
|
||||
repeated DatanodeIDsProto targets = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* List of blocks to be recovered by the datanode
|
||||
*/
|
||||
message BlockRecoveryCommndProto {
|
||||
repeated RecoveringBlockProto blocks = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalize the upgrade at the datanode
|
||||
*/
|
||||
message FinalizeCommandProto {
|
||||
required string blockPoolId = 1; // Block pool to be finalized
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the block keys at the datanode
|
||||
*/
|
||||
message KeyUpdateCommandProto {
|
||||
required ExportedBlockKeysProto keys = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instruct datanode to register with the namenode
|
||||
*/
|
||||
message RegisterCommandProto {
|
||||
// void
|
||||
}
|
||||
|
||||
/**
|
||||
* Generic distributed upgrade Command
|
||||
*/
|
||||
message UpgradeCommandProto {
|
||||
enum Action {
|
||||
UNKNOWN = 0; // Unknown action
|
||||
REPORT_STATUS = 100; // Report upgrade status
|
||||
START_UPGRADE = 101; // Start upgrade
|
||||
}
|
||||
required uint32 action = 1; // Upgrade action
|
||||
required uint32 version = 2; // Version of the upgrade
|
||||
required uint32 upgradeStatus = 3; // % completed in range 0 & 100
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - Information of the datanode registering with the namenode
|
||||
*/
|
||||
message RegisterDatanodeRequestProto {
|
||||
required DatanodeRegistrationProto registration = 1; // Datanode info
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - Update registration of the datanode that successfully
|
||||
* registered. StorageInfo will be updated to include new
|
||||
* storage ID if the datanode did not have one in the request.
|
||||
*/
|
||||
message RegisterDatanodeResponseProto {
|
||||
required DatanodeRegistrationProto registration = 1; // Datanode info
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - datanode registration information
|
||||
* capacity - total storage capacity available at the datanode
|
||||
* dfsUsed - storage used by HDFS
|
||||
* remaining - remaining storage available for HDFS
|
||||
* blockPoolUsed - storage used by the block pool
|
||||
* xmitsInProgress - number of transfers from this datanode to others
|
||||
* xceiverCount - number of active transceiver threads
|
||||
* failedVolumes - number of failed volumes
|
||||
*/
|
||||
message HeartbeatRequestProto {
|
||||
required DatanodeRegistrationProto registration = 1; // Datanode info
|
||||
required uint64 capacity = 2;
|
||||
required uint64 dfsUsed = 3;
|
||||
required uint64 remaining = 4;
|
||||
required uint64 blockPoolUsed = 5;
|
||||
required uint32 xmitsInProgress = 6;
|
||||
required uint32 xceiverCount = 7;
|
||||
required uint32 failedVolumes = 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* cmds - Commands from namenode to datanode.
|
||||
*/
|
||||
message HeartbeatResponseProto {
|
||||
repeated DatanodeCommandProto cmds = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - datanode registration information
|
||||
* blockPoolID - block pool ID of the reported blocks
|
||||
* blocks - each block is represented as two longs in the array.
|
||||
* first long represents block ID
|
||||
* second long represents length
|
||||
*/
|
||||
message BlockReportRequestProto {
|
||||
required DatanodeRegistrationProto registration = 1;
|
||||
required string blockPoolId = 2;
|
||||
repeated uint64 blocks = 3 [packed=true];
|
||||
}
|
||||
|
||||
/**
|
||||
* cmd - Command from namenode to the datanode
|
||||
*/
|
||||
message BlockReportResponseProto {
|
||||
required DatanodeCommandProto cmd = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Data structure to send received or deleted block information
|
||||
* from datanode to namenode.
|
||||
*
|
||||
* deleteHint set to "-" indicates block deletion.
|
||||
* other deleteHint indicates block addition.
|
||||
*/
|
||||
message ReceivedDeletedBlockInfoProto {
|
||||
required BlockProto block = 1;
|
||||
optional string deleteHint = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - datanode registration information
|
||||
* blockPoolID - block pool ID of the reported blocks
|
||||
* blocks - Received/deleted block list
|
||||
*/
|
||||
message BlockReceivedAndDeletedRequestProto {
|
||||
required DatanodeRegistrationProto registration = 1;
|
||||
required string blockPoolId = 2;
|
||||
repeated ReceivedDeletedBlockInfoProto blocks = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message BlockReceivedAndDeletedResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* registartion - Datanode reporting the error
|
||||
* errorCode - error code indicating the error
|
||||
* msg - Free text description of the error
|
||||
*/
|
||||
message ErrorReportRequestProto {
|
||||
enum ErrorCode {
|
||||
NOTIFY = 0; // Error report to be logged at the namenode
|
||||
DISK_ERROR = 1; // DN has disk errors but still has valid volumes
|
||||
INVALID_BLOCK = 2; // Command from namenode has invalid block ID
|
||||
FATAL_DISK_ERROR = 3; // No valid volumes left on datanode
|
||||
}
|
||||
required DatanodeRegistrationProto registartion = 1; // Registartion info
|
||||
required uint32 errorCode = 2; // Error code
|
||||
required string msg = 3; // Error message
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message ErrorReportResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* cmd - Upgrade command sent from datanode to namenode
|
||||
*/
|
||||
message ProcessUpgradeRequestProto {
|
||||
optional UpgradeCommandProto cmd = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* cmd - Upgrade command sent from namenode to datanode
|
||||
*/
|
||||
message ProcessUpgradeResponseProto {
|
||||
optional UpgradeCommandProto cmd = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* blocks - list of blocks that are reported as corrupt
|
||||
*/
|
||||
message ReportBadBlocksRequestProto {
|
||||
repeated LocatedBlockProto blocks = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message ReportBadBlocksResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit block synchronization request during lease recovery
|
||||
*/
|
||||
message CommitBlockSynchronizationRequestProto {
|
||||
required ExtendedBlockProto block = 1;
|
||||
required uint64 newGenStamp = 2;
|
||||
required uint64 newLength = 3;
|
||||
required bool closeFile = 4;
|
||||
required bool deleteBlock = 5;
|
||||
repeated DatanodeIDProto newTaragets = 6;
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message CommitBlockSynchronizationResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol used from datanode to the namenode
|
||||
* See the request and response for details of rpc call.
|
||||
*/
|
||||
service DatanodeProtocolService {
|
||||
/**
|
||||
* Register a datanode at a namenode
|
||||
*/
|
||||
rpc registerDatanode(RegisterDatanodeRequestProto)
|
||||
returns(RegisterDatanodeResponseProto);
|
||||
|
||||
/**
|
||||
* Send heartbeat from datanode to namenode
|
||||
*/
|
||||
rpc sendHeartbeat(HeartbeatRequestProto) returns(HeartbeatResponseProto);
|
||||
|
||||
/**
|
||||
* Report blocks at a given datanode to the namenode
|
||||
*/
|
||||
rpc blockReport(BlockReportRequestProto) returns(BlockReportResponseProto);
|
||||
|
||||
/**
|
||||
* Report from datanode about recently received or deleted block
|
||||
*/
|
||||
rpc blockReceivedAndDeleted(BlockReceivedAndDeletedRequestProto)
|
||||
returns(BlockReceivedAndDeletedResponseProto);
|
||||
|
||||
/**
|
||||
* Report from a datanode of an error to the active namenode.
|
||||
* Used for debugging.
|
||||
*/
|
||||
rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto);
|
||||
|
||||
/**
|
||||
* Generic way to send commands from datanode to namenode during
|
||||
* distributed upgrade process.
|
||||
*/
|
||||
rpc processUpgrade(ProcessUpgradeRequestProto) returns(ProcessUpgradeResponseProto);
|
||||
|
||||
/**
|
||||
* Report corrupt blocks at the specified location
|
||||
*/
|
||||
rpc reportBadBlocks(ReportBadBlocksRequestProto) returns(ReportBadBlocksResponseProto);
|
||||
|
||||
/**
|
||||
* Commit block synchronization during lease recovery.
|
||||
*/
|
||||
rpc commitBlockSynchronization(CommitBlockSynchronizationRequestProto)
|
||||
returns(CommitBlockSynchronizationResponseProto);
|
||||
}
|
|
@ -41,6 +41,8 @@ Trunk (unreleased changes)
|
|||
(tucu)
|
||||
|
||||
BUG FIXES
|
||||
MAPREDUCE-3412. Fix 'ant docs'. (amarrk)
|
||||
|
||||
MAPREDUCE-3346. [Rumen] LoggedTaskAttempt#getHostName() returns null.
|
||||
(amarrk)
|
||||
|
||||
|
@ -106,6 +108,17 @@ Release 0.23.1 - Unreleased
|
|||
MAPREDUCE-3372. HADOOP_PREFIX cannot be overridden.
|
||||
(Bruno Mahé via tomwhite)
|
||||
|
||||
MAPREDUCE-3411. Performance Upgrade for jQuery (Jonathan Eagles via
|
||||
mahadev)
|
||||
|
||||
MAPREDUCE-3371. Review and improve the yarn-api javadocs. (Ravi Prakash
|
||||
via mahadev)
|
||||
|
||||
MAPREDUCE-3238. Small cleanup in SchedulerApp. (Todd Lipcon via mahadev)
|
||||
|
||||
MAPREDUCE-3413. RM web ui applications not sorted in any order by default.
|
||||
(Jonathan Eagles via mahadev)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -159,6 +172,20 @@ Release 0.23.1 - Unreleased
|
|||
|
||||
MAPREDUCE-3444. trunk/0.23 builds broken (Hitesh Shah via mahadev)
|
||||
|
||||
MAPREDUCE-3454. [Gridmix] TestDistCacheEmulation is broken (Hitesh Shah
|
||||
via mahadev)
|
||||
|
||||
MAPREDUCE-3408. yarn-daemon.sh unconditionnaly sets yarn.root.logger
|
||||
(Bruno Mahe via mahadev)
|
||||
|
||||
MAPREDUCE-3329. Fixed CapacityScheduler to ensure maximum-capacity cannot
|
||||
be lesser than capacity for any queue. (acmurthy)
|
||||
|
||||
MAPREDUCE-3464. mapreduce jsp pages missing DOCTYPE. (Dave Vronay via mattf)
|
||||
|
||||
MAPREDUCE-3265. Removed debug logs during job submission to LOG.debug to
|
||||
cut down noise. (acmurthy)
|
||||
|
||||
Release 0.23.0 - 2011-11-01
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -932,10 +932,6 @@
|
|||
<style basedir="${mapred.src.dir}" destdir="${build.docs}"
|
||||
includes="mapred-default.xml" style="conf/configuration.xsl"/>
|
||||
<antcall target="changes-to-html"/>
|
||||
<subant target="docs">
|
||||
<property name="build.docs" value="${build.docs}"/>
|
||||
<fileset file="${contrib.dir}/build.xml"/>
|
||||
</subant>
|
||||
</target>
|
||||
|
||||
<target name="javadoc-dev" depends="compile, ivy-retrieve-javadoc" description="Generate javadoc for hadoop developers">
|
||||
|
|
|
@ -51,6 +51,8 @@ public class AppView extends TwoColumnLayout {
|
|||
|
||||
private String jobsTableInit() {
|
||||
return tableInit().
|
||||
// Sort by id upon page load
|
||||
append(", aaSorting: [[0, 'asc']]").
|
||||
append(",aoColumns:[{sType:'title-numeric'},").
|
||||
append("null,null,{sType:'title-numeric', bSearchable:false},null,").
|
||||
append("null,{sType:'title-numeric',bSearchable:false}, null, null]}").
|
||||
|
|
|
@ -119,6 +119,9 @@ public class TaskPage extends AppView {
|
|||
}
|
||||
|
||||
private String attemptsTableInit() {
|
||||
return tableInit().append("}").toString();
|
||||
return tableInit().
|
||||
// Sort by id upon page load
|
||||
append(", aaSorting: [[0, 'asc']]").
|
||||
append("}").toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,8 @@ public class TasksPage extends AppView {
|
|||
|
||||
private String tasksTableInit() {
|
||||
return tableInit().
|
||||
// Sort by id upon page load
|
||||
append(", aaSorting: [[0, 'asc']]").
|
||||
append(",aoColumns:[{sType:'title-numeric'},{sType:'title-numeric',").
|
||||
append("bSearchable:false},null,{sType:'title-numeric'},").
|
||||
append("{sType:'title-numeric'},{sType:'title-numeric'}]}").toString();
|
||||
|
|
|
@ -72,7 +72,7 @@ public class TaskLog {
|
|||
if (!LOG_DIR.exists()) {
|
||||
boolean b = LOG_DIR.mkdirs();
|
||||
if (!b) {
|
||||
LOG.warn("mkdirs failed. Ignoring.");
|
||||
LOG.debug("mkdirs failed. Ignoring.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ public class Cluster {
|
|||
break;
|
||||
}
|
||||
else {
|
||||
LOG.info("Cannot pick " + provider.getClass().getName()
|
||||
LOG.debug("Cannot pick " + provider.getClass().getName()
|
||||
+ " as the ClientProtocolProvider - returned null protocol");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -296,8 +296,12 @@ public class HsTaskPage extends HsView {
|
|||
} else { //MAP
|
||||
b.append(", 5");
|
||||
}
|
||||
b.append(" ] }");
|
||||
b.append("]}");
|
||||
b.append(" ] }]");
|
||||
|
||||
// Sort by id upon page load
|
||||
b.append(", aaSorting: [[0, 'asc']]");
|
||||
|
||||
b.append("}");
|
||||
return b.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -74,8 +74,12 @@ public class HsTasksPage extends HsView {
|
|||
} else { //MAP
|
||||
b.append(", 7");
|
||||
}
|
||||
b.append(" ] }");
|
||||
b.append("]}");
|
||||
b.append(" ] }]");
|
||||
|
||||
// Sort by id upon page load
|
||||
b.append(", aaSorting: [[0, 'asc']]");
|
||||
|
||||
b.append("}");
|
||||
return b.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -84,11 +84,17 @@ public class HsView extends TwoColumnLayout {
|
|||
*/
|
||||
private String jobsTableInit() {
|
||||
return tableInit().
|
||||
append(",aoColumnDefs:[").
|
||||
append("{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 6 ] }").
|
||||
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 7 ] }").
|
||||
// Sort by id upon page load
|
||||
append(", aaSorting: [[2, 'asc']]").
|
||||
append(", aoColumnDefs:[").
|
||||
// Maps Total
|
||||
append("{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 7 ] }").
|
||||
// Maps Completed
|
||||
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 8 ] }").
|
||||
// Reduces Total
|
||||
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 9 ] }").
|
||||
// Reduces Completed
|
||||
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 10 ] }").
|
||||
append("]}").
|
||||
toString();
|
||||
}
|
||||
|
|
|
@ -30,12 +30,9 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.mapreduce.JobID;
|
||||
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
|
||||
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
||||
import org.apache.hadoop.mapreduce.v2.security.client.ClientHSSecurityInfo;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityInfo;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||
|
||||
public class ClientCache {
|
||||
|
@ -79,9 +76,9 @@ public class ClientCache {
|
|||
if (StringUtils.isEmpty(serviceAddr)) {
|
||||
return null;
|
||||
}
|
||||
LOG.info("Connecting to HistoryServer at: " + serviceAddr);
|
||||
LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
|
||||
final YarnRPC rpc = YarnRPC.create(conf);
|
||||
LOG.info("Connected to HistoryServer at: " + serviceAddr);
|
||||
LOG.debug("Connected to HistoryServer at: " + serviceAddr);
|
||||
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
|
||||
return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
|
||||
@Override
|
||||
|
|
|
@ -143,7 +143,7 @@ public class ClientServiceDelegate {
|
|||
|| YarnApplicationState.RUNNING == application
|
||||
.getYarnApplicationState()) {
|
||||
if (application == null) {
|
||||
LOG.info("Could not get Job info from RM for job " + jobId
|
||||
LOG.debug("Could not get Job info from RM for job " + jobId
|
||||
+ ". Redirecting to job history server.");
|
||||
return checkAndGetHSProxy(null, JobState.NEW);
|
||||
}
|
||||
|
@ -169,8 +169,8 @@ public class ClientServiceDelegate {
|
|||
+ ":" + addr.getPort()));
|
||||
UserGroupInformation.getCurrentUser().addToken(clientToken);
|
||||
}
|
||||
LOG.info("Tracking Url of JOB is " + application.getTrackingUrl());
|
||||
LOG.info("Connecting to " + serviceAddr);
|
||||
LOG.info("The url to track the job: " + application.getTrackingUrl());
|
||||
LOG.debug("Connecting to " + serviceAddr);
|
||||
realProxy = instantiateAMProxy(serviceAddr);
|
||||
return realProxy;
|
||||
} catch (IOException e) {
|
||||
|
@ -187,7 +187,7 @@ public class ClientServiceDelegate {
|
|||
}
|
||||
application = rm.getApplicationReport(appId);
|
||||
if (application == null) {
|
||||
LOG.info("Could not get Job info from RM for job " + jobId
|
||||
LOG.debug("Could not get Job info from RM for job " + jobId
|
||||
+ ". Redirecting to job history server.");
|
||||
return checkAndGetHSProxy(null, JobState.RUNNING);
|
||||
}
|
||||
|
@ -281,16 +281,13 @@ public class ClientServiceDelegate {
|
|||
LOG.debug("Tracing remote error ", e.getTargetException());
|
||||
throw (YarnRemoteException) e.getTargetException();
|
||||
}
|
||||
LOG.info("Failed to contact AM/History for job " + jobId +
|
||||
" retrying..");
|
||||
LOG.debug("Failed exception on AM/History contact",
|
||||
e.getTargetException());
|
||||
LOG.debug("Failed to contact AM/History for job " + jobId +
|
||||
" retrying..", e.getTargetException());
|
||||
// Force reconnection by setting the proxy to null.
|
||||
realProxy = null;
|
||||
} catch (Exception e) {
|
||||
LOG.info("Failed to contact AM/History for job " + jobId
|
||||
+ " Will retry..");
|
||||
LOG.debug("Failing to contact application master", e);
|
||||
LOG.debug("Failed to contact AM/History for job " + jobId
|
||||
+ " Will retry..", e);
|
||||
// Force reconnection by setting the proxy to null.
|
||||
realProxy = null;
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.util.List;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -40,11 +39,9 @@ import org.apache.hadoop.mapreduce.TypeConverter;
|
|||
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityInfo;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
|
||||
|
@ -56,6 +53,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
|
|||
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
|
@ -67,13 +65,13 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
|||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||
import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo;
|
||||
|
||||
|
||||
// TODO: This should be part of something like yarn-client.
|
||||
public class ResourceMgrDelegate {
|
||||
private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
|
||||
|
||||
private final String rmAddress;
|
||||
private YarnConfiguration conf;
|
||||
ClientRMProtocol applicationsManager;
|
||||
private ApplicationId applicationId;
|
||||
|
@ -92,21 +90,25 @@ public class ResourceMgrDelegate {
|
|||
YarnConfiguration.DEFAULT_RM_ADDRESS),
|
||||
YarnConfiguration.DEFAULT_RM_PORT,
|
||||
YarnConfiguration.RM_ADDRESS);
|
||||
LOG.info("Connecting to ResourceManager at " + rmAddress);
|
||||
this.rmAddress = rmAddress.toString();
|
||||
LOG.debug("Connecting to ResourceManager at " + rmAddress);
|
||||
applicationsManager =
|
||||
(ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
|
||||
rmAddress, this.conf);
|
||||
LOG.info("Connected to ResourceManager at " + rmAddress);
|
||||
LOG.debug("Connected to ResourceManager at " + rmAddress);
|
||||
}
|
||||
|
||||
/**
|
||||
* Used for injecting applicationsManager, mostly for testing.
|
||||
* @param conf the configuration object
|
||||
* @param applicationsManager the handle to talk the resource managers {@link ClientRMProtocol}.
|
||||
* @param applicationsManager the handle to talk the resource managers
|
||||
* {@link ClientRMProtocol}.
|
||||
*/
|
||||
public ResourceMgrDelegate(YarnConfiguration conf, ClientRMProtocol applicationsManager) {
|
||||
public ResourceMgrDelegate(YarnConfiguration conf,
|
||||
ClientRMProtocol applicationsManager) {
|
||||
this.conf = conf;
|
||||
this.applicationsManager = applicationsManager;
|
||||
this.rmAddress = applicationsManager.toString();
|
||||
}
|
||||
|
||||
public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
|
||||
|
@ -295,18 +297,22 @@ public class ResourceMgrDelegate {
|
|||
}
|
||||
|
||||
|
||||
public ApplicationId submitApplication(ApplicationSubmissionContext appContext)
|
||||
public ApplicationId submitApplication(
|
||||
ApplicationSubmissionContext appContext)
|
||||
throws IOException {
|
||||
appContext.setApplicationId(applicationId);
|
||||
SubmitApplicationRequest request = recordFactory.newRecordInstance(SubmitApplicationRequest.class);
|
||||
SubmitApplicationRequest request =
|
||||
recordFactory.newRecordInstance(SubmitApplicationRequest.class);
|
||||
request.setApplicationSubmissionContext(appContext);
|
||||
applicationsManager.submitApplication(request);
|
||||
LOG.info("Submitted application " + applicationId + " to ResourceManager");
|
||||
LOG.info("Submitted application " + applicationId + " to ResourceManager" +
|
||||
" at " + rmAddress);
|
||||
return applicationId;
|
||||
}
|
||||
|
||||
public void killApplication(ApplicationId applicationId) throws IOException {
|
||||
KillApplicationRequest request = recordFactory.newRecordInstance(KillApplicationRequest.class);
|
||||
KillApplicationRequest request =
|
||||
recordFactory.newRecordInstance(KillApplicationRequest.class);
|
||||
request.setApplicationId(applicationId);
|
||||
applicationsManager.forceKillApplication(request);
|
||||
LOG.info("Killing application " + applicationId);
|
||||
|
|
|
@ -276,7 +276,7 @@ public class YARNRunner implements ClientProtocol {
|
|||
Resource capability = recordFactory.newRecordInstance(Resource.class);
|
||||
capability.setMemory(conf.getInt(MRJobConfig.MR_AM_VMEM_MB,
|
||||
MRJobConfig.DEFAULT_MR_AM_VMEM_MB));
|
||||
LOG.info("AppMaster capability = " + capability);
|
||||
LOG.debug("AppMaster capability = " + capability);
|
||||
|
||||
// Setup LocalResources
|
||||
Map<String, LocalResource> localResources =
|
||||
|
@ -352,7 +352,7 @@ public class YARNRunner implements ClientProtocol {
|
|||
}
|
||||
vargsFinal.add(mergedCommand.toString());
|
||||
|
||||
LOG.info("Command to launch container for ApplicationMaster is : "
|
||||
LOG.debug("Command to launch container for ApplicationMaster is : "
|
||||
+ mergedCommand);
|
||||
|
||||
// Setup the CLASSPATH in environment
|
||||
|
|
|
@ -87,8 +87,8 @@ fi
|
|||
|
||||
# some variables
|
||||
export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
|
||||
export YARN_ROOT_LOGGER="INFO,DRFA"
|
||||
export YARN_JHS_LOGGER="INFO,JSA"
|
||||
export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA}
|
||||
export YARN_JHS_LOGGER=${YARN_JHS_LOGGER:-INFO,JSA}
|
||||
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
|
||||
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
|
||||
|
||||
|
|
|
@ -91,8 +91,8 @@ public interface AMRMProtocol {
|
|||
*
|
||||
* <p>This also doubles up as a <em>heartbeat</em> to let the
|
||||
* <code>ResourceManager</code> know that the <code>ApplicationMaster</code>
|
||||
* is alive. Thus, applications should use periodically make this call to
|
||||
* be kept alive.</p>
|
||||
* is alive. Thus, applications should periodically make this call to be kept
|
||||
* alive. The frequency depends on ??</p>
|
||||
*
|
||||
* <p>The <code>ResourceManager</code> responds with list of allocated
|
||||
* {@link Container}, status of completed containers and headroom information
|
||||
|
|
|
@ -68,7 +68,8 @@ public interface ClientRMProtocol {
|
|||
* {@link GetNewApplicationResponse}.</p>
|
||||
*
|
||||
* @param request request to get a new <code>ApplicationId</code>
|
||||
* @return new <code>ApplicationId</code> to be used to submit an application
|
||||
* @return response containing the new <code>ApplicationId</code> to be used
|
||||
* to submit an application
|
||||
* @throws YarnRemoteException
|
||||
* @see #submitApplication(SubmitApplicationRequest)
|
||||
*/
|
||||
|
@ -216,7 +217,7 @@ public interface ClientRMProtocol {
|
|||
|
||||
/**
|
||||
* <p>The interface used by clients to get information about <em>queue
|
||||
* acls</em> for <em>current users</em> from the <code>ResourceManager</code>.
|
||||
* acls</em> for <em>current user</em> from the <code>ResourceManager</code>.
|
||||
* </p>
|
||||
*
|
||||
* <p>The <code>ResourceManager</code> responds with queue acls for all
|
||||
|
|
|
@ -79,7 +79,7 @@ public interface ContainerManager {
|
|||
* to <em>stop</em> a {@link Container} allocated to it using this interface.
|
||||
* </p>
|
||||
*
|
||||
* <p>The <code>ApplicationMaster</code></p> sends a
|
||||
* <p>The <code>ApplicationMaster</code> sends a
|
||||
* {@link StopContainerRequest} which includes the {@link ContainerId} of the
|
||||
* container to be stopped.</p>
|
||||
*
|
||||
|
@ -105,7 +105,7 @@ public interface ContainerManager {
|
|||
* current status of a <code>Container</code> from the
|
||||
* <code>NodeManager</code>.</p>
|
||||
*
|
||||
* <p>The <code>ApplicationMaster</code></p> sends a
|
||||
* <p>The <code>ApplicationMaster</code> sends a
|
||||
* {@link GetContainerStatusRequest} which includes the {@link ContainerId} of
|
||||
* the container whose status is needed.</p>
|
||||
*
|
||||
|
@ -115,7 +115,8 @@ public interface ContainerManager {
|
|||
*
|
||||
* @param request request to get <code>ContainerStatus</code> of a container
|
||||
* with the specified <code>ContainerId</code>
|
||||
* @return <code>ContainerStatus</code> of the container
|
||||
* @return response containing the <code>ContainerStatus</code> of the
|
||||
* container
|
||||
* @throws YarnRemoteException
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
|||
* <li>
|
||||
* A list of unused {@link Container} which are being returned.
|
||||
* </li>
|
||||
* <li></li>
|
||||
* </ul>
|
||||
* </p>
|
||||
*
|
||||
|
@ -81,7 +80,7 @@ public interface AllocateRequest {
|
|||
void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId);
|
||||
|
||||
/**
|
||||
* Get the <em>response id</em>.
|
||||
* Get the <em>response id</em> used to track duplicate responses.
|
||||
* @return <em>response id</em>
|
||||
*/
|
||||
@Public
|
||||
|
@ -89,7 +88,7 @@ public interface AllocateRequest {
|
|||
int getResponseId();
|
||||
|
||||
/**
|
||||
* Set the <em>response id</em>
|
||||
* Set the <em>response id</em> used to track duplicate responses.
|
||||
* @param id <em>response id</em>
|
||||
*/
|
||||
@Public
|
||||
|
@ -113,7 +112,7 @@ public interface AllocateRequest {
|
|||
void setProgress(float progress);
|
||||
|
||||
/**
|
||||
* Get the list of <code>ResourceRequest</code> to upate the
|
||||
* Get the list of <code>ResourceRequest</code> to update the
|
||||
* <code>ResourceManager</code> about the application's resource requirements.
|
||||
* @return the list of <code>ResourceRequest</code>
|
||||
*/
|
||||
|
@ -130,9 +129,9 @@ public interface AllocateRequest {
|
|||
int getAskCount();
|
||||
|
||||
/**
|
||||
* Add list of <code>ResourceRequest</code> to upate the
|
||||
* Add list of <code>ResourceRequest</code> to update the
|
||||
* <code>ResourceManager</code> about the application's resource requirements.
|
||||
* @param resourceRequest list of <code>ResourceRequest</code> to upate the
|
||||
* @param resourceRequest list of <code>ResourceRequest</code> to update the
|
||||
* <code>ResourceManager</code> about the application's
|
||||
* resource requirements
|
||||
*/
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.hadoop.yarn.api.records.Container;
|
|||
* <ul>
|
||||
* <li>Response ID to track duplicate responses.</li>
|
||||
* <li>
|
||||
* A reboot flag to let the <code>ApplicationMaster</code> that its
|
||||
* A reboot flag to let the <code>ApplicationMaster</code> know that its
|
||||
* horribly out of sync and needs to reboot.</li>
|
||||
* <li>A list of newly allocated {@link Container}.</li>
|
||||
* <li>A list of completed {@link Container}.</li>
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
* <p>The request from clients to get a report of all Applications
|
||||
* in the cluster from the <code>ResourceManager</code>.</p>
|
||||
*
|
||||
* <p>Currently, this is empty.</p>
|
||||
*
|
||||
* @see ClientRMProtocol#getAllApplications(GetAllApplicationsRequest)
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
* <p>The request sent by clients to get cluster metrics from the
|
||||
* <code>ResourceManager</code>.</p>
|
||||
*
|
||||
* <p>Currently, this is empty.</p>
|
||||
*
|
||||
* @see ClientRMProtocol#getClusterMetrics(GetClusterMetricsRequest)
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
* <p>The request from clients to get a report of all nodes
|
||||
* in the cluster from the <code>ResourceManager</code>.</p>
|
||||
*
|
||||
* <p>Currently, this is empty.</p>
|
||||
*
|
||||
* @see ClientRMProtocol#getClusterNodes(GetClusterNodesRequest)
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
|
|||
|
||||
/**
|
||||
* <p>The response sent by the <code>ResourceManager</code> to a client
|
||||
* requesting an {@link NodeReport} for all nodes.</p>
|
||||
* requesting a {@link NodeReport} for all nodes.</p>
|
||||
*
|
||||
* <p>The <code>NodeReport</code> contains per-node information such as
|
||||
* available resources, number of containers, tracking url, rack name, health
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
|||
|
||||
/**
|
||||
* <p>The response sent by the <code>NodeManager</code> to the
|
||||
* <code>ApplicationMaster</code> when asked to obtainer <em>status</em>
|
||||
* <code>ApplicationMaster</code> when asked to obtain the <em>status</em>
|
||||
* of a container.</p>
|
||||
*
|
||||
* @see ContainerManager#getContainerStatus(GetContainerStatusRequest)
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
|
|||
* <p>The request sent by clients to get a new {@link ApplicationId} for
|
||||
* submitting an application.</p>
|
||||
*
|
||||
* <p>Currently, this is empty.</p>
|
||||
*
|
||||
* @see ClientRMProtocol#getNewApplication(GetNewApplicationRequest)
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
|
|||
|
||||
/**
|
||||
* <p>The response sent by the <code>ResourceManager</code> to the client for
|
||||
* a request to a new {@link ApplicationId} for submitting applications.</p>
|
||||
* a request to get a new {@link ApplicationId} for submitting applications.</p>
|
||||
*
|
||||
* @see ClientRMProtocol#getNewApplication(GetNewApplicationRequest)
|
||||
*/
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
* <p>The request sent by clients to the <code>ResourceManager</code> to
|
||||
* get queue acls for the <em>current user</em>.</p>
|
||||
*
|
||||
* <p>Currently, this is empty.</p>
|
||||
*
|
||||
* @see ClientRMProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -35,11 +35,53 @@ import org.apache.hadoop.yarn.api.ContainerManager;
|
|||
@Public
|
||||
@Stable
|
||||
public interface StartContainerResponse {
|
||||
/**
|
||||
* <p>Get the responses from all auxiliary services running on the
|
||||
* <code>NodeManager</code>.</p>
|
||||
* <p>The responses are returned as a Map between the auxiliary service names
|
||||
* and their corresponding opaque blob <code>ByteBuffer</code>s</p>
|
||||
* @return a Map between the auxiliary service names and their outputs
|
||||
*/
|
||||
Map<String, ByteBuffer> getAllServiceResponse();
|
||||
|
||||
/**
|
||||
* Get the response from a single auxiliary service running on the
|
||||
* <code>NodeManager</code>
|
||||
*
|
||||
* @param key The auxiliary service name whose response is desired.
|
||||
* @return The opaque blob <code>ByteBuffer</code> returned by the auxiliary
|
||||
* service.
|
||||
*/
|
||||
ByteBuffer getServiceResponse(String key);
|
||||
|
||||
/**
|
||||
* Add to the list of auxiliary services which have been started on the
|
||||
* <code>NodeManager</code>. This is done only once when the
|
||||
* <code>NodeManager</code> starts up
|
||||
* @param serviceResponse A map from auxiliary service names to the opaque
|
||||
* blob <code>ByteBuffer</code>s for that auxiliary service
|
||||
*/
|
||||
void addAllServiceResponse(Map<String, ByteBuffer> serviceResponse);
|
||||
|
||||
/**
|
||||
* Add to the list of auxiliary services which have been started on the
|
||||
* <code>NodeManager</code>. This is done only once when the
|
||||
* <code>NodeManager</code> starts up
|
||||
*
|
||||
* @param key The auxiliary service name
|
||||
* @param value The opaque blob <code>ByteBuffer</code> managed by the
|
||||
* auxiliary service
|
||||
*/
|
||||
void setServiceResponse(String key, ByteBuffer value);
|
||||
|
||||
/**
|
||||
* Remove a single auxiliary service from the StartContainerResponse object
|
||||
* @param key The auxiliary service to remove
|
||||
*/
|
||||
void removeServiceResponse(String key);
|
||||
|
||||
/**
|
||||
* Remove all the auxiliary services from the StartContainerResponse object
|
||||
*/
|
||||
void clearServiceResponse();
|
||||
}
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.api.ContainerManager;
|
|||
* <code>ApplicationMaster</code> when asked to <em>stop</em> an
|
||||
* allocated container.</p>
|
||||
*
|
||||
* <p>Currently, this is empty.</p>
|
||||
*
|
||||
* @see ContainerManager#stopContainer(StopContainerRequest)
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
* <p>The response sent by the <code>ResourceManager</code> to a client on
|
||||
* application submission.</p>
|
||||
*
|
||||
* <p>Currently, this is empty.</p>
|
||||
*
|
||||
* @see ClientRMProtocol#submitApplication(SubmitApplicationRequest)
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
|
|||
* <ul>
|
||||
* <li>Response ID to track duplicate responses.</li>
|
||||
* <li>
|
||||
* A reboot flag to let the <code>ApplicationMaster</code> that its
|
||||
* A reboot flag to let the <code>ApplicationMaster</code> know that its
|
||||
* horribly out of sync and needs to reboot.</li>
|
||||
* <li>A list of newly allocated {@link Container}.</li>
|
||||
* <li>A list of completed {@link Container}.</li>
|
||||
|
@ -100,7 +100,7 @@ public interface AMResponse {
|
|||
/**
|
||||
* Get the <em>available headroom</em> for resources in the cluster for the
|
||||
* application.
|
||||
* @return limit available headroom for resources in the cluster for the
|
||||
* @return limit of available headroom for resources in the cluster for the
|
||||
* application
|
||||
*/
|
||||
@Public
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
* <li>Applications user.</li>
|
||||
* <li>Application queue.</li>
|
||||
* <li>Application name.</li>
|
||||
* <li>Host on which the <code>ApplicationMaster</code>is running.</li>
|
||||
* <li>Host on which the <code>ApplicationMaster</code> is running.</li>
|
||||
* <li>RPC port of the <code>ApplicationMaster</code>.</li>
|
||||
* <li>Tracking URL.</li>
|
||||
* <li>{@link YarnApplicationState} of the application.</li>
|
||||
|
@ -215,6 +215,7 @@ public interface ApplicationReport {
|
|||
|
||||
/**
|
||||
* Get the <em>final finish status</em> of the application.
|
||||
* @return <em>final finish status</em> of the application
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
|
|
|
@ -33,22 +33,43 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
|||
@Public
|
||||
@Stable
|
||||
public interface ApplicationResourceUsageReport {
|
||||
|
||||
/**
|
||||
* Get the number of used containers
|
||||
* @return the number of used containers
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
int getNumUsedContainers();
|
||||
|
||||
/**
|
||||
* Set the number of used containers
|
||||
* @param num_containers the number of used containers
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
void setNumUsedContainers(int num_containers);
|
||||
|
||||
/**
|
||||
* Get the number of reserved containers
|
||||
* @return the number of reserved containers
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
int getNumReservedContainers();
|
||||
|
||||
/**
|
||||
* Set the number of reserved containers
|
||||
* @param num_reserved_containers the number of reserved containers
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
void setNumReservedContainers(int num_reserved_containers);
|
||||
|
||||
/**
|
||||
* Get the used <code>Resource</code>
|
||||
* @return the used <code>Resource</code>
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
Resource getUsedResources();
|
||||
|
@ -57,6 +78,10 @@ public interface ApplicationResourceUsageReport {
|
|||
@Unstable
|
||||
void setUsedResources(Resource resources);
|
||||
|
||||
/**
|
||||
* Get the reserved <code>Resource</code>
|
||||
* @return the reserved <code>Resource</code>
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
Resource getReservedResources();
|
||||
|
@ -65,6 +90,10 @@ public interface ApplicationResourceUsageReport {
|
|||
@Unstable
|
||||
void setReservedResources(Resource reserved_resources);
|
||||
|
||||
/**
|
||||
* Get the needed <code>Resource</code>
|
||||
* @return the needed <code>Resource</code>
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
Resource getNeededResources();
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
|||
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
||||
|
||||
/**
|
||||
* <p><code>ApplicationSubmissionContext</code> represents the all of the
|
||||
* <p><code>ApplicationSubmissionContext</code> represents all of the
|
||||
* information needed by the <code>ResourceManager</code> to launch
|
||||
* the <code>ApplicationMaster</code> for an application.</p>
|
||||
*
|
||||
|
|
|
@ -38,8 +38,7 @@ import org.apache.hadoop.yarn.api.ContainerManager;
|
|||
* <ul>
|
||||
* <li>{@link ContainerId} for the container, which is globally unique.</li>
|
||||
* <li>
|
||||
* {@link NodeId} of the node on which identifies the node on which it
|
||||
* is allocated.
|
||||
* {@link NodeId} of the node on which it is allocated.
|
||||
* </li>
|
||||
* <li>HTTP uri of the node.</li>
|
||||
* <li>{@link Resource} allocated to the container.</li>
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
|
|||
import org.apache.hadoop.yarn.api.ContainerManager;
|
||||
|
||||
/**
|
||||
* <p><code>ContainerLaunchContext</code> represents the all of the information
|
||||
* <p><code>ContainerLaunchContext</code> represents all of the information
|
||||
* needed by the <code>NodeManager</code> to launch a container.</p>
|
||||
*
|
||||
* <p>It includes details such as:
|
||||
|
@ -127,7 +127,8 @@ public interface ContainerLaunchContext {
|
|||
Map<String, LocalResource> getLocalResources();
|
||||
|
||||
/**
|
||||
* Set <code>LocalResource</code> required by the container.
|
||||
* Set <code>LocalResource</code> required by the container. All pre-existing
|
||||
* Map entries are cleared before adding the new Map
|
||||
* @param localResources <code>LocalResource</code> required by the container
|
||||
*/
|
||||
@Public
|
||||
|
@ -143,7 +144,8 @@ public interface ContainerLaunchContext {
|
|||
Map<String, ByteBuffer> getServiceData();
|
||||
|
||||
/**
|
||||
* Set application-specific binary <em>service data</em>.
|
||||
* Set application-specific binary <em>service data</em>. All pre-existing Map
|
||||
* entries are preserved.
|
||||
* @param serviceData application-specific binary <em>service data</em>
|
||||
*/
|
||||
@Public
|
||||
|
@ -159,7 +161,8 @@ public interface ContainerLaunchContext {
|
|||
Map<String, String> getEnvironment();
|
||||
|
||||
/**
|
||||
* Add <em>environment variables</em> for the container.
|
||||
* Add <em>environment variables</em> for the container. All pre-existing Map
|
||||
* entries are cleared before adding the new Map
|
||||
* @param environment <em>environment variables</em> for the container
|
||||
*/
|
||||
@Public
|
||||
|
@ -175,7 +178,8 @@ public interface ContainerLaunchContext {
|
|||
List<String> getCommands();
|
||||
|
||||
/**
|
||||
* Add the list of <em>commands</em> for launching the container.
|
||||
* Add the list of <em>commands</em> for launching the container. All
|
||||
* pre-existing List entries are cleared before adding the new List
|
||||
* @param commands the list of <em>commands</em> for launching the container
|
||||
*/
|
||||
@Public
|
||||
|
@ -191,8 +195,9 @@ public interface ContainerLaunchContext {
|
|||
public Map<ApplicationAccessType, String> getApplicationACLs();
|
||||
|
||||
/**
|
||||
* Set the <code>ApplicationACL</code>s for the application.
|
||||
* @param acls
|
||||
* Set the <code>ApplicationACL</code>s for the application. All pre-existing
|
||||
* Map entries are cleared before adding the new Map
|
||||
* @param acls <code>ApplicationACL</code>s for the application
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
|
|
|
@ -35,8 +35,6 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
* </li>
|
||||
* <li>The previous time at which the health status was reported.</li>
|
||||
* <li>A diagnostic report on the health status.</li>
|
||||
* <li></li>
|
||||
* <li></li>
|
||||
* </ul>
|
||||
* </p>
|
||||
*
|
||||
|
|
|
@ -18,10 +18,23 @@
|
|||
|
||||
package org.apache.hadoop.yarn.api.records;
|
||||
|
||||
/**
|
||||
* The priority assigned to a ResourceRequest or Application or Container
|
||||
* allocation
|
||||
*
|
||||
*/
|
||||
public interface Priority extends Comparable<Priority> {
|
||||
|
||||
/**
|
||||
* Get the assigned priority
|
||||
* @return the assigned priority
|
||||
*/
|
||||
public abstract int getPriority();
|
||||
|
||||
/**
|
||||
* Set the assigned priority
|
||||
* @param priority the assigned priority
|
||||
*/
|
||||
public abstract void setPriority(int priority);
|
||||
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
|||
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
||||
|
||||
/**
|
||||
* <p>QueueInfo</p> is a report of the runtime information of the queue.</p>
|
||||
* <p>QueueInfo is a report of the runtime information of the queue.</p>
|
||||
*
|
||||
* <p>It includes information such as:
|
||||
* <ul>
|
||||
|
|
|
@ -93,7 +93,7 @@ import org.apache.hadoop.yarn.util.Records;
|
|||
* to inform the <code>ResourceManager</code> that it is up and alive. The {@link AMRMProtocol#allocate} to the
|
||||
* <code>ResourceManager</code> from the <code>ApplicationMaster</code> acts as a heartbeat.
|
||||
*
|
||||
* <p> For the actual handling of the job, the <code>ApplicationMaster</code> has to request for the
|
||||
* <p> For the actual handling of the job, the <code>ApplicationMaster</code> has to request the
|
||||
* <code>ResourceManager</code> via {@link AllocateRequest} for the required no. of containers using {@link ResourceRequest}
|
||||
* with the necessary resource specifications such as node location, computational (memory/disk/cpu) resource requirements.
|
||||
* The <code>ResourceManager</code> responds with an {@link AllocateResponse} that informs the <code>ApplicationMaster</code>
|
||||
|
|
|
@ -37,12 +37,12 @@ import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
|
|||
*/
|
||||
public class HadoopYarnProtoRPC extends YarnRPC {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(HadoopYarnRPC.class);
|
||||
private static final Log LOG = LogFactory.getLog(HadoopYarnProtoRPC.class);
|
||||
|
||||
@Override
|
||||
public Object getProxy(Class protocol, InetSocketAddress addr,
|
||||
Configuration conf) {
|
||||
LOG.info("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
|
||||
LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
|
||||
return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1,
|
||||
addr, conf);
|
||||
}
|
||||
|
@ -57,11 +57,11 @@ public class HadoopYarnProtoRPC extends YarnRPC {
|
|||
InetSocketAddress addr, Configuration conf,
|
||||
SecretManager<? extends TokenIdentifier> secretManager,
|
||||
int numHandlers) {
|
||||
LOG.info("Creating a HadoopYarnProtoRpc server for protocol " + protocol +
|
||||
LOG.debug("Creating a HadoopYarnProtoRpc server for protocol " + protocol +
|
||||
" with " + numHandlers + " handlers");
|
||||
|
||||
return RpcFactoryProvider.getServerFactory(conf).getServer(protocol, instance,
|
||||
addr, conf, secretManager, numHandlers);
|
||||
return RpcFactoryProvider.getServerFactory(conf).getServer(protocol,
|
||||
instance, addr, conf, secretManager, numHandlers);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class HadoopYarnRPC extends YarnRPC {
|
|||
@Override
|
||||
public Object getProxy(Class protocol, InetSocketAddress addr,
|
||||
Configuration conf) {
|
||||
LOG.info("Creating a HadoopYarnRpc proxy for protocol " + protocol);
|
||||
LOG.debug("Creating a HadoopYarnRpc proxy for protocol " + protocol);
|
||||
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
|
||||
try {
|
||||
return RPC.getProxy(protocol, 1, addr, conf);
|
||||
|
@ -64,7 +64,7 @@ public class HadoopYarnRPC extends YarnRPC {
|
|||
InetSocketAddress addr, Configuration conf,
|
||||
SecretManager<? extends TokenIdentifier> secretManager,
|
||||
int numHandlers) {
|
||||
LOG.info("Creating a HadoopYarnRpc server for protocol " + protocol +
|
||||
LOG.debug("Creating a HadoopYarnRpc server for protocol " + protocol +
|
||||
" with " + numHandlers + " handlers");
|
||||
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
|
||||
final RPC.Server hadoopServer;
|
||||
|
|
|
@ -46,7 +46,8 @@ public abstract class YarnRPC {
|
|||
int numHandlers);
|
||||
|
||||
public static YarnRPC create(Configuration conf) {
|
||||
LOG.info("Creating YarnRPC for " + conf.get(YarnConfiguration.IPC_RPC_IMPL));
|
||||
LOG.debug("Creating YarnRPC for " +
|
||||
conf.get(YarnConfiguration.IPC_RPC_IMPL));
|
||||
String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL);
|
||||
if (clazzName == null) {
|
||||
clazzName = YarnConfiguration.DEFAULT_IPC_RPC_IMPL;
|
||||
|
|
|
@ -39,9 +39,9 @@ public class ApplicationTokenSelector implements
|
|||
if (service == null) {
|
||||
return null;
|
||||
}
|
||||
LOG.info("Looking for a token with service " + service.toString());
|
||||
LOG.debug("Looking for a token with service " + service.toString());
|
||||
for (Token<? extends TokenIdentifier> token : tokens) {
|
||||
LOG.info("Token kind is " + token.getKind().toString()
|
||||
LOG.debug("Token kind is " + token.getKind().toString()
|
||||
+ " and the token's service name is " + token.getService());
|
||||
if (ApplicationTokenIdentifier.KIND_NAME.equals(token.getKind())
|
||||
&& service.equals(token.getService())) {
|
||||
|
|
|
@ -39,9 +39,9 @@ public class ClientTokenSelector implements
|
|||
if (service == null) {
|
||||
return null;
|
||||
}
|
||||
LOG.info("Looking for a token with service " + service.toString());
|
||||
LOG.debug("Looking for a token with service " + service.toString());
|
||||
for (Token<? extends TokenIdentifier> token : tokens) {
|
||||
LOG.info("Token kind is " + token.getKind().toString()
|
||||
LOG.debug("Token kind is " + token.getKind().toString()
|
||||
+ " and the token's service name is " + token.getService());
|
||||
if (ClientTokenIdentifier.KIND_NAME.equals(token.getKind())
|
||||
&& service.equals(token.getService())) {
|
||||
|
|
|
@ -79,11 +79,11 @@ public class JQueryUI extends HtmlBlock {
|
|||
@Override
|
||||
protected void render(Block html) {
|
||||
html.
|
||||
link(join("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/themes/",
|
||||
link(join("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/themes/",
|
||||
getTheme(), "/jquery-ui.css")).
|
||||
link(root_url("static/dt-1.7.5/css/jui-dt.css")).
|
||||
script("https://ajax.googleapis.com/ajax/libs/jquery/1.4.4/jquery.min.js").
|
||||
script("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/jquery-ui.min.js").
|
||||
script("https://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js").
|
||||
script("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/jquery-ui.min.js").
|
||||
script(root_url("static/dt-1.7.5/js/jquery.dataTables.min.js")).
|
||||
script(root_url("static/yarn.dt.plugins.js")).
|
||||
script(root_url("static/themeswitcher.js")).
|
||||
|
@ -224,7 +224,7 @@ public class JQueryUI extends HtmlBlock {
|
|||
}
|
||||
|
||||
public static StringBuilder tableInit() {
|
||||
return new StringBuilder("{bJQueryUI:true, aaSorting:[], ").
|
||||
return new StringBuilder("{bJQueryUI:true, ").
|
||||
append("sPaginationType: 'full_numbers', iDisplayLength:20, ").
|
||||
append("aLengthMenu:[20, 40, 60, 80, 100]");
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ public class AuxServices extends AbstractService
|
|||
/**
|
||||
* @return the meta data for all registered services, that have been started.
|
||||
* If a service has not been started no metadata will be available. The key
|
||||
* the the name of the service as defined in the configuration.
|
||||
* is the name of the service as defined in the configuration.
|
||||
*/
|
||||
public Map<String, ByteBuffer> getMeta() {
|
||||
Map<String, ByteBuffer> metaClone = new HashMap<String, ByteBuffer>(
|
||||
|
|
|
@ -51,6 +51,8 @@ public class AllApplicationsPage extends NMView {
|
|||
|
||||
private String appsTableInit() {
|
||||
return tableInit().
|
||||
// Sort by id upon page load
|
||||
append(", aaSorting: [[0, 'asc']]").
|
||||
// applicationid, applicationstate
|
||||
append(", aoColumns:[null, null]} ").toString();
|
||||
}
|
||||
|
|
|
@ -53,6 +53,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReservedEvent;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
|
||||
import com.google.common.collect.HashMultiset;
|
||||
import com.google.common.collect.Multiset;
|
||||
|
||||
/**
|
||||
* Represents an Application from the viewpoint of the scheduler.
|
||||
* Each running Application in the RM corresponds to one instance
|
||||
* of this class.
|
||||
*/
|
||||
public class SchedulerApp {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(SchedulerApp.class);
|
||||
|
@ -76,11 +84,16 @@ public class SchedulerApp {
|
|||
final Map<Priority, Map<NodeId, RMContainer>> reservedContainers =
|
||||
new HashMap<Priority, Map<NodeId, RMContainer>>();
|
||||
|
||||
Map<Priority, Integer> schedulingOpportunities =
|
||||
new HashMap<Priority, Integer>();
|
||||
/**
|
||||
* Count how many times the application has been given an opportunity
|
||||
* to schedule a task at each priority. Each time the scheduler
|
||||
* asks the application for a task at this priority, it is incremented,
|
||||
* and each time the application successfully schedules a task, it
|
||||
* is reset to 0.
|
||||
*/
|
||||
Multiset<Priority> schedulingOpportunities = HashMultiset.create();
|
||||
|
||||
Map<Priority, Integer> reReservations =
|
||||
new HashMap<Priority, Integer>();
|
||||
Multiset<Priority> reReservations = HashMultiset.create();
|
||||
|
||||
Resource currentReservation = recordFactory
|
||||
.newRecordInstance(Resource.class);
|
||||
|
@ -282,49 +295,33 @@ public class SchedulerApp {
|
|||
}
|
||||
|
||||
synchronized public void resetSchedulingOpportunities(Priority priority) {
|
||||
this.schedulingOpportunities.put(priority, Integer.valueOf(0));
|
||||
this.schedulingOpportunities.setCount(priority, 0);
|
||||
}
|
||||
|
||||
synchronized public void addSchedulingOpportunity(Priority priority) {
|
||||
Integer schedulingOpportunities =
|
||||
this.schedulingOpportunities.get(priority);
|
||||
if (schedulingOpportunities == null) {
|
||||
schedulingOpportunities = 0;
|
||||
}
|
||||
++schedulingOpportunities;
|
||||
this.schedulingOpportunities.put(priority, schedulingOpportunities);
|
||||
this.schedulingOpportunities.setCount(priority,
|
||||
schedulingOpportunities.count(priority) + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of times the application has been given an opportunity
|
||||
* to schedule a task at the given priority since the last time it
|
||||
* successfully did so.
|
||||
*/
|
||||
synchronized public int getSchedulingOpportunities(Priority priority) {
|
||||
Integer schedulingOpportunities =
|
||||
this.schedulingOpportunities.get(priority);
|
||||
if (schedulingOpportunities == null) {
|
||||
schedulingOpportunities = 0;
|
||||
this.schedulingOpportunities.put(priority, schedulingOpportunities);
|
||||
}
|
||||
return schedulingOpportunities;
|
||||
return this.schedulingOpportunities.count(priority);
|
||||
}
|
||||
|
||||
synchronized void resetReReservations(Priority priority) {
|
||||
this.reReservations.put(priority, Integer.valueOf(0));
|
||||
this.reReservations.setCount(priority, 0);
|
||||
}
|
||||
|
||||
synchronized void addReReservation(Priority priority) {
|
||||
Integer reReservations = this.reReservations.get(priority);
|
||||
if (reReservations == null) {
|
||||
reReservations = 0;
|
||||
}
|
||||
++reReservations;
|
||||
this.reReservations.put(priority, reReservations);
|
||||
this.reReservations.add(priority);
|
||||
}
|
||||
|
||||
synchronized public int getReReservations(Priority priority) {
|
||||
Integer reReservations = this.reReservations.get(priority);
|
||||
if (reReservations == null) {
|
||||
reReservations = 0;
|
||||
this.reReservations.put(priority, reReservations);
|
||||
}
|
||||
return reReservations;
|
||||
return this.reReservations.count(priority);
|
||||
}
|
||||
|
||||
public synchronized int getNumReservedContainers(Priority priority) {
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
|
||||
|
||||
class CSQueueUtils {
|
||||
|
||||
public static void checkMaxCapacity(String queueName,
|
||||
float capacity, float maximumCapacity) {
|
||||
if (maximumCapacity != CapacitySchedulerConfiguration.UNDEFINED &&
|
||||
maximumCapacity < capacity) {
|
||||
throw new IllegalArgumentException(
|
||||
"Illegal call to setMaxCapacity. " +
|
||||
"Queue '" + queueName + "' has " +
|
||||
"capacity (" + capacity + ") greater than " +
|
||||
"maximumCapacity (" + maximumCapacity + ")" );
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -160,6 +160,10 @@ public class CapacitySchedulerConfiguration extends Configuration {
|
|||
return maxCapacity;
|
||||
}
|
||||
|
||||
public void setMaximumCapacity(String queue, int maxCapacity) {
|
||||
setInt(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
|
||||
}
|
||||
|
||||
public int getUserLimit(String queue) {
|
||||
int userLimit =
|
||||
getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT);
|
||||
|
|
|
@ -211,16 +211,19 @@ public class LeafQueue implements CSQueue {
|
|||
|
||||
private synchronized void setupQueueConfigs(
|
||||
float capacity, float absoluteCapacity,
|
||||
float maxCapacity, float absoluteMaxCapacity,
|
||||
float maximumCapacity, float absoluteMaxCapacity,
|
||||
int userLimit, float userLimitFactor,
|
||||
int maxApplications, int maxApplicationsPerUser,
|
||||
int maxActiveApplications, int maxActiveApplicationsPerUser,
|
||||
QueueState state, Map<QueueACL, AccessControlList> acls)
|
||||
{
|
||||
// Sanity check
|
||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||
|
||||
this.capacity = capacity;
|
||||
this.absoluteCapacity = parent.getAbsoluteCapacity() * capacity;
|
||||
|
||||
this.maximumCapacity = maxCapacity;
|
||||
this.maximumCapacity = maximumCapacity;
|
||||
this.absoluteMaxCapacity = absoluteMaxCapacity;
|
||||
|
||||
this.userLimit = userLimit;
|
||||
|
@ -236,9 +239,9 @@ public class LeafQueue implements CSQueue {
|
|||
|
||||
this.acls = acls;
|
||||
|
||||
this.queueInfo.setCapacity(capacity);
|
||||
this.queueInfo.setMaximumCapacity(maximumCapacity);
|
||||
this.queueInfo.setQueueState(state);
|
||||
this.queueInfo.setCapacity(this.capacity);
|
||||
this.queueInfo.setMaximumCapacity(this.maximumCapacity);
|
||||
this.queueInfo.setQueueState(this.state);
|
||||
|
||||
StringBuilder aclsString = new StringBuilder();
|
||||
for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
|
||||
|
@ -250,7 +253,7 @@ public class LeafQueue implements CSQueue {
|
|||
" [= (float) configuredCapacity / 100 ]" + "\n" +
|
||||
"asboluteCapacity = " + absoluteCapacity +
|
||||
" [= parentAbsoluteCapacity * capacity ]" + "\n" +
|
||||
"maxCapacity = " + maxCapacity +
|
||||
"maxCapacity = " + maximumCapacity +
|
||||
" [= configuredMaxCapacity ]" + "\n" +
|
||||
"absoluteMaxCapacity = " + absoluteMaxCapacity +
|
||||
" [= Float.MAX_VALUE if maximumCapacity undefined, " +
|
||||
|
@ -394,6 +397,9 @@ public class LeafQueue implements CSQueue {
|
|||
* @param maximumCapacity new max capacity
|
||||
*/
|
||||
synchronized void setMaxCapacity(float maximumCapacity) {
|
||||
// Sanity check
|
||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||
|
||||
this.maximumCapacity = maximumCapacity;
|
||||
this.absoluteMaxCapacity =
|
||||
(maximumCapacity == CapacitySchedulerConfiguration.UNDEFINED) ?
|
||||
|
|
|
@ -153,6 +153,9 @@ public class ParentQueue implements CSQueue {
|
|||
float maximumCapacity, float absoluteMaxCapacity,
|
||||
QueueState state, Map<QueueACL, AccessControlList> acls
|
||||
) {
|
||||
// Sanity check
|
||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||
|
||||
this.capacity = capacity;
|
||||
this.absoluteCapacity = absoluteCapacity;
|
||||
this.maximumCapacity = maximumCapacity;
|
||||
|
@ -162,9 +165,9 @@ public class ParentQueue implements CSQueue {
|
|||
|
||||
this.acls = acls;
|
||||
|
||||
this.queueInfo.setCapacity(capacity);
|
||||
this.queueInfo.setMaximumCapacity(maximumCapacity);
|
||||
this.queueInfo.setQueueState(state);
|
||||
this.queueInfo.setCapacity(this.capacity);
|
||||
this.queueInfo.setMaximumCapacity(this.maximumCapacity);
|
||||
this.queueInfo.setQueueState(this.state);
|
||||
|
||||
StringBuilder aclsString = new StringBuilder();
|
||||
for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
|
||||
|
@ -484,6 +487,9 @@ public class ParentQueue implements CSQueue {
|
|||
* @param maximumCapacity new max capacity
|
||||
*/
|
||||
synchronized void setMaxCapacity(float maximumCapacity) {
|
||||
// Sanity check
|
||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||
|
||||
this.maximumCapacity = maximumCapacity;
|
||||
float parentAbsoluteCapacity =
|
||||
(rootQueue) ? 100.0f : parent.getAbsoluteCapacity();
|
||||
|
|
|
@ -61,6 +61,10 @@ public class RmView extends TwoColumnLayout {
|
|||
StringBuilder init = tableInit().
|
||||
append(", aoColumns:[{sType:'title-numeric'}, null, null, null, null,").
|
||||
append("null,{sType:'title-numeric', bSearchable:false}, null, null]");
|
||||
|
||||
// Sort by id upon page load
|
||||
init.append(", aaSorting: [[0, 'asc']]");
|
||||
|
||||
String rows = $("rowlimit");
|
||||
int rowLimit = rows.isEmpty() ? MAX_DISPLAY_ROWS : Integer.parseInt(rows);
|
||||
if (list.apps.size() < rowLimit) {
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
||||
|
@ -35,7 +37,6 @@ public class TestQueueParsing {
|
|||
|
||||
CapacityScheduler capacityScheduler = new CapacityScheduler();
|
||||
capacityScheduler.reinitialize(conf, null, null);
|
||||
//capacityScheduler.g
|
||||
}
|
||||
|
||||
private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
|
||||
|
@ -104,4 +105,48 @@ public class TestQueueParsing {
|
|||
CapacityScheduler capacityScheduler = new CapacityScheduler();
|
||||
capacityScheduler.reinitialize(conf, null, null);
|
||||
}
|
||||
|
||||
public void testMaxCapacity() throws Exception {
|
||||
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
|
||||
|
||||
conf.setQueues(CapacityScheduler.ROOT, new String[] {"a", "b", "c"});
|
||||
conf.setCapacity(CapacityScheduler.ROOT, 100);
|
||||
|
||||
final String A = CapacityScheduler.ROOT + ".a";
|
||||
conf.setCapacity(A, 50);
|
||||
conf.setMaximumCapacity(A, 60);
|
||||
|
||||
final String B = CapacityScheduler.ROOT + ".b";
|
||||
conf.setCapacity(B, 50);
|
||||
conf.setMaximumCapacity(B, 45); // Should throw an exception
|
||||
|
||||
|
||||
boolean fail = false;
|
||||
CapacityScheduler capacityScheduler;
|
||||
try {
|
||||
capacityScheduler = new CapacityScheduler();
|
||||
capacityScheduler.reinitialize(conf, null, null);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
fail = true;
|
||||
}
|
||||
Assert.assertTrue("Didn't throw IllegalArgumentException for wrong maxCap",
|
||||
fail);
|
||||
|
||||
conf.setMaximumCapacity(B, 60);
|
||||
|
||||
// Now this should work
|
||||
capacityScheduler = new CapacityScheduler();
|
||||
capacityScheduler.reinitialize(conf, null, null);
|
||||
|
||||
fail = false;
|
||||
try {
|
||||
LeafQueue a = (LeafQueue)capacityScheduler.getQueue(A);
|
||||
a.setMaxCapacity(45);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
fail = true;
|
||||
}
|
||||
Assert.assertTrue("Didn't throw IllegalArgumentException for wrong " +
|
||||
"setMaxCap", fail);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
~~ limitations under the License. See accompanying LICENSE file.
|
||||
|
||||
---
|
||||
Hadoop Map Reduce Next Generation-${project.version} - Cluster Setup
|
||||
Hadoop Distributed File System-${project.version} - Federation
|
||||
---
|
||||
---
|
||||
${maven.build.timestamp}
|
||||
|
@ -57,12 +57,12 @@ HDFS Federation
|
|||
* Storage - is provided by datanodes by storing blocks on the local file
|
||||
system and allows read/write access.
|
||||
|
||||
The current HDFS architecture allows only a single namespace for the
|
||||
The prior HDFS architecture allows only a single namespace for the
|
||||
entire cluster. A single Namenode manages this namespace. HDFS
|
||||
Federation addresses limitation of current architecture by adding
|
||||
Federation addresses limitation of the prior architecture by adding
|
||||
support multiple Namenodes/namespaces to HDFS file system.
|
||||
|
||||
* {HDFS Federation}
|
||||
* {Multiple Namenodes/Namespaces}
|
||||
|
||||
In order to scale the name service horizontally, federation uses multiple
|
||||
independent Namenodes/namespaces. The Namenodes are federated, that is, the
|
||||
|
@ -103,9 +103,9 @@ HDFS Federation
|
|||
of small files benefit from scaling the namespace by adding more
|
||||
Namenodes to the cluster
|
||||
|
||||
* Performance - File system operation throughput is currently limited
|
||||
by a single Namenode. Adding more Namenodes to the cluster scales the
|
||||
file system read/write operations throughput.
|
||||
* Performance - File system operation throughput is limited by a single
|
||||
Namenode in the prior architecture. Adding more Namenodes to the cluster
|
||||
scales the file system read/write operations throughput.
|
||||
|
||||
* Isolation - A single Namenode offers no isolation in multi user
|
||||
environment. An experimental application can overload the Namenode
|
||||
|
@ -265,7 +265,7 @@ HDFS Federation
|
|||
> $HADOOP_PREFIX_HOME/bin/start-dfs.sh
|
||||
----
|
||||
|
||||
To start the cluster run the following command:
|
||||
To stop the cluster run the following command:
|
||||
|
||||
----
|
||||
> $HADOOP_PREFIX_HOME/bin/stop-dfs.sh
|
||||
|
@ -300,7 +300,7 @@ HDFS Federation
|
|||
** Decommissioning
|
||||
|
||||
Decommissioning is similar to prior releases. The nodes that need to be
|
||||
decommissioning are added to the exclude file at all the Namenode. Each
|
||||
decomissioned are added to the exclude file at all the Namenode. Each
|
||||
Namenode decommissions its Block Pool. When all the Namenodes finish
|
||||
decommissioning a datanode, the datanode is considered to be decommissioned.
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -64,6 +64,10 @@
|
|||
rev="${yarn.version}" conf="common->default"/>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-yarn-common"
|
||||
rev="${yarn.version}" conf="common->default"/>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-jobclient"
|
||||
rev="${yarn.version}" conf="test->default">
|
||||
<artifact name="hadoop-mapreduce-client-jobclient" type="tests" ext="jar" m:classifier="tests"/>
|
||||
</dependency>
|
||||
<dependency org="commons-logging"
|
||||
name="commons-logging"
|
||||
rev="${commons-logging.version}"
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<!--
|
||||
Before you can run these subtargets directly, you need
|
||||
to call at top-level: ant deploy-contrib compile-core-test
|
||||
-->
|
||||
<project name="streaming" default="jar">
|
||||
|
||||
<import file="../build-contrib.xml"/>
|
||||
|
||||
<!-- Override jar target to specify main class -->
|
||||
<target name="jar" depends="compile">
|
||||
<jar
|
||||
jarfile="${build.dir}/hadoop-${version}-${name}.jar"
|
||||
basedir="${build.classes}"
|
||||
>
|
||||
<manifest>
|
||||
<attribute name="Main-Class" value="org.apache.hadoop.streaming.HadoopStreaming"/>
|
||||
</manifest>
|
||||
</jar>
|
||||
</target>
|
||||
|
||||
<!-- Run all unit tests. superdottest -->
|
||||
<target name="test">
|
||||
<antcall target="hadoopbuildcontrib.test">
|
||||
</antcall>
|
||||
</target>
|
||||
|
||||
<!--Run all system tests.-->
|
||||
<target name="test-system">
|
||||
<antcall target="hadoopbuildcontrib.test-system">
|
||||
</antcall>
|
||||
</target>
|
||||
|
||||
</project>
|
|
@ -1,98 +0,0 @@
|
|||
<?xml version="1.0" ?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
|
||||
<info organisation="org.apache.hadoop" module="${ant.project.name}">
|
||||
<license name="Apache 2.0"/>
|
||||
<ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
|
||||
<description>
|
||||
Apache Hadoop
|
||||
</description>
|
||||
</info>
|
||||
<configurations defaultconfmapping="default">
|
||||
<!--these match the Maven configurations-->
|
||||
<conf name="default" extends="master,runtime"/>
|
||||
<conf name="master" description="contains the artifact but no dependencies"/>
|
||||
<conf name="runtime" description="runtime but not the artifact" />
|
||||
|
||||
<conf name="common" visibility="private"
|
||||
extends="runtime"
|
||||
description="artifacts needed to compile/test the application"/>
|
||||
<conf name="test" visibility="private" extends="runtime"/>
|
||||
</configurations>
|
||||
|
||||
<publications>
|
||||
<!--get the artifact from our module name-->
|
||||
<artifact conf="master"/>
|
||||
</publications>
|
||||
<dependencies>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-common"
|
||||
rev="${hadoop-common.version}" conf="common->default"/>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-common"
|
||||
rev="${hadoop-common.version}" conf="test->default">
|
||||
<artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests"/>
|
||||
</dependency>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-hdfs"
|
||||
rev="${hadoop-hdfs.version}" conf="common->default"/>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-hdfs"
|
||||
rev="${hadoop-hdfs.version}" conf="test->default">
|
||||
<artifact name="hadoop-hdfs" type="tests" ext="jar" m:classifier="tests"/>
|
||||
</dependency>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
|
||||
rev="${yarn.version}" conf="common->default"/>
|
||||
<dependency org="org.apache.hadoop" name="hadoop-yarn-common"
|
||||
rev="${yarn.version}" conf="common->default"/>
|
||||
|
||||
<dependency org="commons-cli" name="commons-cli"
|
||||
rev="${commons-cli.version}" conf="common->default"/>
|
||||
<dependency org="commons-logging" name="commons-logging"
|
||||
rev="${commons-logging.version}" conf="common->default"/>
|
||||
<dependency org="junit" name="junit"
|
||||
rev="${junit.version}" conf="common->default"/>
|
||||
<dependency org="org.mortbay.jetty" name="jetty-util"
|
||||
rev="${jetty-util.version}" conf="common->master"/>
|
||||
<dependency org="org.mortbay.jetty" name="jetty"
|
||||
rev="${jetty.version}" conf="common->master"/>
|
||||
<dependency org="org.mortbay.jetty" name="jsp-api-2.1"
|
||||
rev="${jetty.version}" conf="common->master"/>
|
||||
<dependency org="org.mortbay.jetty" name="jsp-2.1"
|
||||
rev="${jetty.version}" conf="common->master"/>
|
||||
<dependency org="org.mortbay.jetty" name="servlet-api-2.5"
|
||||
rev="${servlet-api-2.5.version}" conf="common->master"/>
|
||||
<dependency org="commons-httpclient" name="commons-httpclient"
|
||||
rev="${commons-httpclient.version}" conf="common->default"/>
|
||||
<dependency org="log4j" name="log4j"
|
||||
rev="${log4j.version}" conf="common->master"/>
|
||||
<dependency org="org.apache.avro" name="avro"
|
||||
rev="${avro.version}" conf="common->default">
|
||||
<exclude module="ant"/>
|
||||
<exclude module="jetty"/>
|
||||
<exclude module="slf4j-simple"/>
|
||||
</dependency>
|
||||
<dependency org="org.slf4j" name="slf4j-api"
|
||||
rev="${slf4j-api.version}" conf="common->master"/>
|
||||
|
||||
<!-- Exclusions for transitive dependencies pulled in by log4j -->
|
||||
<exclude org="com.sun.jdmk"/>
|
||||
<exclude org="com.sun.jmx"/>
|
||||
<exclude org="javax.jms"/>
|
||||
<exclude org="javax.mail"/>
|
||||
|
||||
</dependencies>
|
||||
</ivy-module>
|
|
@ -1,17 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#This properties file lists the versions of the various artifacts used by streaming.
|
||||
#It drives ivy and the generation of a maven POM
|
||||
|
||||
#Please list the dependencies name with version if they are different from the ones
|
||||
#listed in the global libraries.properties file (in alphabetical order)
|
|
@ -242,7 +242,11 @@ for my $rel (@releases) {
|
|||
|
||||
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
||||
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA
|
||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA Common
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)} # Link to JIRA Hdfs
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)} # Link to JIRA MR
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
print " <li>$item</li>\n";
|
||||
}
|
||||
|
|
|
@ -2060,9 +2060,7 @@
|
|||
<p>Hadoop comes configured with a single mandatory queue, called
|
||||
'default'. Queue names are defined in the
|
||||
<code>mapred.queue.names</code> property of the Hadoop site
|
||||
configuration. Some job schedulers, such as the
|
||||
<a href="capacity_scheduler.html">Capacity Scheduler</a>,
|
||||
support multiple queues.</p>
|
||||
configuration.</p>
|
||||
|
||||
<p>A job defines the queue it needs to be submitted to through the
|
||||
<code>mapreduce.job.queuename</code> property.
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
%>
|
||||
<%! private static final long serialVersionUID = 1L;
|
||||
%>
|
||||
<!DOCTYPE html>
|
||||
<html><body>
|
||||
<%
|
||||
String logFile = request.getParameter("logFile");
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
<%! private static final long serialVersionUID = 1L;
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Error: User cannot access this Job</title>
|
||||
|
|
|
@ -74,6 +74,7 @@
|
|||
}
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<title>Hadoop <%=jobId%>'s black-listed tasktrackers</title>
|
||||
<body>
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
return;
|
||||
}
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<title>Job Configuration: JobId - <%= jobId %></title>
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<title>Job Configuration: JobId - <%= jobId %></title>
|
||||
|
|
|
@ -267,6 +267,7 @@
|
|||
%>
|
||||
|
||||
<%@page import="org.apache.hadoop.mapred.TaskGraphServlet"%>
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<%
|
||||
|
|
|
@ -60,6 +60,7 @@
|
|||
reasonforFailure = job.getErrorInfo();
|
||||
%>
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Hadoop Job <%=jobid%> on History Viewer</title>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue