Merging trunk to HDFS-1623 branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1177130 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9992cae541
commit
ab0402bc1d
|
@ -598,8 +598,8 @@ runTests () {
|
|||
echo ""
|
||||
echo ""
|
||||
|
||||
echo "$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess
|
||||
echo "$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess
|
||||
if [[ $? != 0 ]] ; then
|
||||
### Find and format names of failed tests
|
||||
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`
|
||||
|
|
|
@ -2,6 +2,12 @@ Hadoop Change Log
|
|||
|
||||
Trunk (unreleased changes)
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
HADOOP-7542. Change Configuration XML format to 1.1 to add support for
|
||||
serializing additional characters. This requires XML1.1
|
||||
support in the XML parser (Christopher Egner via harsh)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
|
||||
|
@ -13,6 +19,11 @@ Trunk (unreleased changes)
|
|||
|
||||
HADOOP-7635. RetryInvocationHandler should release underlying resources on
|
||||
close (atm)
|
||||
|
||||
HADOOP-7668. Add a NetUtils method that can tell if an InetAddress
|
||||
belongs to local host. (suresh)
|
||||
|
||||
HADOOP-7687 Make getProtocolSignature public (sanjay)
|
||||
|
||||
BUGS
|
||||
|
||||
|
@ -23,6 +34,16 @@ Trunk (unreleased changes)
|
|||
|
||||
HADOOP-7641. Add Apache License to template config files (Eric Yang via atm)
|
||||
|
||||
HADOOP-7621. alfredo config should be in a file not readable by users
|
||||
(Alejandro Abdelnur via atm)
|
||||
|
||||
HADOOP-7669 Fix newly introduced release audit warning.
|
||||
(Uma Maheswara Rao G via stevel)
|
||||
|
||||
HADOOP-6220. HttpServer wraps InterruptedExceptions by IOExceptions if interrupted
|
||||
in startup (stevel)
|
||||
|
||||
|
||||
Release 0.23.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -287,9 +308,6 @@ Release 0.23.0 - Unreleased
|
|||
HADOOP-7430. Improve error message when moving to trash fails due to
|
||||
quota issue. (Ravi Prakash via mattf)
|
||||
|
||||
HADOOP-7457. Remove out-of-date Chinese language documentation.
|
||||
(Jakob Homan via eli)
|
||||
|
||||
HADOOP-7444. Add Checksum API to verify and calculate checksums "in bulk"
|
||||
(todd)
|
||||
|
||||
|
@ -388,6 +406,13 @@ Release 0.23.0 - Unreleased
|
|||
HADOOP-7599. Script improvements to setup a secure Hadoop cluster
|
||||
(Eric Yang via ddas)
|
||||
|
||||
HADOOP-7639. Enhance HttpServer to allow passing path-specs for filtering,
|
||||
so that servers like Yarn WebApp can get filtered the paths served by
|
||||
their own injected servlets. (Thomas Graves via vinodkv)
|
||||
|
||||
HADOOP-7575. Enhanced LocalDirAllocator to support fully-qualified
|
||||
paths. (Jonathan Eagles via vinodkv)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
|
||||
|
@ -398,6 +423,9 @@ Release 0.23.0 - Unreleased
|
|||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-7630. hadoop-metrics2.properties should have a property *.period
|
||||
set to a default value for metrics. (Eric Yang via mattf)
|
||||
|
||||
HADOOP-7327. FileSystem.listStatus() throws NullPointerException instead of
|
||||
IOException upon access permission failure. (mattf)
|
||||
|
||||
|
@ -603,6 +631,9 @@ Release 0.23.0 - Unreleased
|
|||
HADOOP-7631. Fixes a config problem to do with running streaming jobs
|
||||
(Eric Yang via ddas)
|
||||
|
||||
HADOOP-7662. Fixed logs servlet to use the pathspec '/*' instead of '/'
|
||||
for correct filtering. (Thomas Graves via vinodkv)
|
||||
|
||||
Release 0.22.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -1118,6 +1149,11 @@ Release 0.22.0 - Unreleased
|
|||
HADOOP-7568. SequenceFile should not print into stdout.
|
||||
(Plamen Jeliazkov via shv)
|
||||
|
||||
HADOOP-7663. Fix TestHDFSTrash failure. (Mayank Bansal via shv)
|
||||
|
||||
HADOOP-7457. Remove out-of-date Chinese language documentation.
|
||||
(Jakob Homan via eli)
|
||||
|
||||
Release 0.21.1 - Unreleased
|
||||
|
||||
IMPROVEMENTS
|
||||
|
|
|
@ -82,10 +82,12 @@
|
|||
<code>36000</code>.
|
||||
</p>
|
||||
|
||||
<p><code>hadoop.http.authentication.signature.secret</code>: The signature secret for
|
||||
signing the authentication tokens. If not set a random secret is generated at
|
||||
<p><code>hadoop.http.authentication.signature.secret.file</code>: The signature secret
|
||||
file for signing the authentication tokens. If not set a random secret is generated at
|
||||
startup time. The same secret should be used for all nodes in the cluster, JobTracker,
|
||||
NameNode, DataNode and TastTracker. The default value is a <code>hadoop</code> value.
|
||||
NameNode, DataNode and TastTracker. The default value is
|
||||
<code>${user.home}/hadoop-http-auth-signature-secret</code>.
|
||||
IMPORTANT: This file should be readable only by the Unix user running the daemons.
|
||||
</p>
|
||||
|
||||
<p><code>hadoop.http.authentication.cookie.domain</code>: The domain to use for the HTTP
|
||||
|
|
|
@ -1632,6 +1632,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
try {
|
||||
doc =
|
||||
DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
|
||||
|
||||
// Allow a broader set of control characters to appear in job confs.
|
||||
// cf https://issues.apache.org/jira/browse/MAPREDUCE-109
|
||||
doc.setXmlVersion( "1.1" );
|
||||
} catch (ParserConfigurationException pe) {
|
||||
throw new IOException(pe);
|
||||
}
|
||||
|
|
|
@ -264,9 +264,15 @@ public class LocalDirAllocator {
|
|||
Path tmpDir = new Path(localDirs[i]);
|
||||
if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
|
||||
try {
|
||||
DiskChecker.checkDir(new File(localDirs[i]));
|
||||
dirs.add(localDirs[i]);
|
||||
dfList.add(new DF(new File(localDirs[i]), 30000));
|
||||
|
||||
File tmpFile = tmpDir.isAbsolute()
|
||||
? new File(localFS.makeQualified(tmpDir).toUri())
|
||||
: new File(localDirs[i]);
|
||||
|
||||
DiskChecker.checkDir(tmpFile);
|
||||
dirs.add(tmpFile.getPath());
|
||||
dfList.add(new DF(tmpFile, 30000));
|
||||
|
||||
} catch (DiskErrorException de) {
|
||||
LOG.warn( localDirs[i] + " is not writable\n", de);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.http;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.net.BindException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URL;
|
||||
|
@ -124,6 +125,29 @@ public class HttpServer implements FilterContainer {
|
|||
boolean findPort, Configuration conf, Connector connector) throws IOException {
|
||||
this(name, bindAddress, port, findPort, conf, null, connector);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a status server on the given port. Allows you to specify the
|
||||
* path specifications that this server will be serving so that they will be
|
||||
* added to the filters properly.
|
||||
*
|
||||
* @param name The name of the server
|
||||
* @param bindAddress The address for this server
|
||||
* @param port The port to use on the server
|
||||
* @param findPort whether the server should start at the given port and
|
||||
* increment by 1 until it finds a free port.
|
||||
* @param conf Configuration
|
||||
* @param pathSpecs Path specifications that this httpserver will be serving.
|
||||
* These will be added to any filters.
|
||||
*/
|
||||
public HttpServer(String name, String bindAddress, int port,
|
||||
boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
|
||||
this(name, bindAddress, port, findPort, conf, null, null);
|
||||
for (String path : pathSpecs) {
|
||||
LOG.info("adding path spec: " + path);
|
||||
addFilterPathMapping(path, webAppContext);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a status server on the given port.
|
||||
|
@ -259,7 +283,7 @@ public class HttpServer implements FilterContainer {
|
|||
if (logDir != null) {
|
||||
Context logContext = new Context(parent, "/logs");
|
||||
logContext.setResourceBase(logDir);
|
||||
logContext.addServlet(AdminAuthorizedServlet.class, "/");
|
||||
logContext.addServlet(AdminAuthorizedServlet.class, "/*");
|
||||
logContext.setDisplayName("logs");
|
||||
setContextAttributes(logContext, conf);
|
||||
defaultContexts.put(logContext, true);
|
||||
|
@ -660,6 +684,9 @@ public class HttpServer implements FilterContainer {
|
|||
}
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} catch (InterruptedException e) {
|
||||
throw (IOException) new InterruptedIOException(
|
||||
"Interrupted while starting HTTP server").initCause(e);
|
||||
} catch (Exception e) {
|
||||
throw new IOException("Problem starting http server", e);
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ public class ProtocolSignature implements Writable {
|
|||
* @param protocol protocol
|
||||
* @return the server's protocol signature
|
||||
*/
|
||||
static ProtocolSignature getProtocolSignature(
|
||||
public static ProtocolSignature getProtocolSignature(
|
||||
int clientMethodsHashCode,
|
||||
long serverVersion,
|
||||
Class<? extends VersionedProtocol> protocol) {
|
||||
|
|
|
@ -516,4 +516,25 @@ public class NetUtils {
|
|||
} catch (UnknownHostException ignore) { }
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given an InetAddress, checks to see if the address is a local address, by
|
||||
* comparing the address with all the interfaces on the node.
|
||||
* @param addr address to check if it is local node's address
|
||||
* @return true if the address corresponds to the local node
|
||||
*/
|
||||
public static boolean isLocalAddress(InetAddress addr) {
|
||||
// Check if the address is any local or loop back
|
||||
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
|
||||
|
||||
// Check if the address is defined on any interface
|
||||
if (!local) {
|
||||
try {
|
||||
local = NetworkInterface.getByInetAddress(addr) != null;
|
||||
} catch (SocketException e) {
|
||||
local = false;
|
||||
}
|
||||
}
|
||||
return local;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,9 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.apache.hadoop.http.FilterInitializer;
|
||||
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -40,8 +43,10 @@ import java.util.Map;
|
|||
*/
|
||||
public class AuthenticationFilterInitializer extends FilterInitializer {
|
||||
|
||||
private static final String PREFIX = "hadoop.http.authentication.";
|
||||
static final String PREFIX = "hadoop.http.authentication.";
|
||||
|
||||
static final String SIGNATURE_SECRET_FILE = AuthenticationFilter.SIGNATURE_SECRET + ".file";
|
||||
|
||||
/**
|
||||
* Initializes Alfredo AuthenticationFilter.
|
||||
* <p/>
|
||||
|
@ -67,6 +72,25 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
|
|||
}
|
||||
}
|
||||
|
||||
String signatureSecretFile = filterConfig.get(SIGNATURE_SECRET_FILE);
|
||||
if (signatureSecretFile == null) {
|
||||
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
|
||||
}
|
||||
|
||||
try {
|
||||
StringBuilder secret = new StringBuilder();
|
||||
Reader reader = new FileReader(signatureSecretFile);
|
||||
int c = reader.read();
|
||||
while (c > -1) {
|
||||
secret.append((char)c);
|
||||
c = reader.read();
|
||||
}
|
||||
reader.close();
|
||||
filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET, secret.toString());
|
||||
} catch (IOException ex) {
|
||||
throw new RuntimeException("Could not read HTTP signature secret file: " + signatureSecretFile);
|
||||
}
|
||||
|
||||
container.addFilter("authentication",
|
||||
AuthenticationFilter.class.getName(),
|
||||
filterConfig);
|
||||
|
|
|
@ -475,7 +475,10 @@ else
|
|||
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/taskcontroller.cfg ${HADOOP_CONF_DIR}/taskcontroller.cfg
|
||||
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
|
||||
if [ ! -e ${HADOOP_CONF_DIR}/capacity-scheduler.xml ]; then
|
||||
template_generator ${HADOOP_PREFIX}/share/hadoop/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
|
||||
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
|
||||
fi
|
||||
if [ ! -e ${HADOOP_CONF_DIR}/hadoop-metrics2.properties ]; then
|
||||
cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
|
||||
fi
|
||||
if [ ! -e ${HADOOP_CONF_DIR}/log4j.properties ]; then
|
||||
cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/log4j.properties ${HADOOP_CONF_DIR}/log4j.properties
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# syntax: [prefix].[source|sink|jmx].[instance].[options]
|
||||
# See package.html for org.apache.hadoop.metrics2 for details
|
||||
|
||||
*.period=60
|
||||
|
|
@ -144,6 +144,26 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.web.authentication.kerberos.principal</name>
|
||||
<value>HTTP/_HOST@${local.realm}</value>
|
||||
<description>
|
||||
The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
||||
|
||||
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
|
||||
HTTP SPENGO specification.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.web.authentication.kerberos.keytab</name>
|
||||
<value>/etc/security/keytabs/nn.service.keytab</value>
|
||||
<description>
|
||||
The Kerberos keytab file with the credentials for the
|
||||
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.keytab.file</name>
|
||||
<value>/etc/security/keytabs/nn.service.keytab</value>
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
# Copyright 2011 The Apache Software Foundation
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Define some default values that can be overridden by system properties
|
||||
hadoop.root.logger=INFO,console
|
||||
hadoop.log.dir=.
|
||||
hadoop.log.file=hadoop.log
|
||||
|
||||
#
|
||||
# Job Summary Appender
|
||||
#
|
||||
# Use following logger to send summary to separate file defined by
|
||||
# hadoop.mapreduce.jobsummary.log.file rolled daily:
|
||||
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
|
||||
#
|
||||
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
|
||||
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
|
||||
|
||||
# Define the root logger to the system property "hadoop.root.logger".
|
||||
log4j.rootLogger=${hadoop.root.logger}, EventCounter
|
||||
|
||||
# Logging Threshold
|
||||
log4j.threshold=ALL
|
||||
|
||||
#
|
||||
# Daily Rolling File Appender
|
||||
#
|
||||
|
||||
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
|
||||
|
||||
# Rollver at midnight
|
||||
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
|
||||
|
||||
# 30-day backup
|
||||
#log4j.appender.DRFA.MaxBackupIndex=30
|
||||
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
|
||||
|
||||
# Pattern format: Date LogLevel LoggerName LogMessage
|
||||
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
# Debugging Pattern format
|
||||
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
||||
|
||||
|
||||
#
|
||||
# console
|
||||
# Add "console" to rootlogger above if you want to use this
|
||||
#
|
||||
|
||||
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.console.target=System.err
|
||||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
|
||||
|
||||
#
|
||||
# TaskLog Appender
|
||||
#
|
||||
|
||||
#Default values
|
||||
hadoop.tasklog.taskid=null
|
||||
hadoop.tasklog.iscleanup=false
|
||||
hadoop.tasklog.noKeepSplits=4
|
||||
hadoop.tasklog.totalLogFileSize=100
|
||||
hadoop.tasklog.purgeLogSplits=true
|
||||
hadoop.tasklog.logsRetainHours=12
|
||||
|
||||
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
|
||||
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
|
||||
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
|
||||
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
|
||||
|
||||
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
|
||||
#
|
||||
#Security appender
|
||||
#
|
||||
hadoop.security.log.file=SecurityAuth.audit
|
||||
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
|
||||
|
||||
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
#new logger
|
||||
# Define some default values that can be overridden by system properties
|
||||
hadoop.security.logger=INFO,console
|
||||
log4j.category.SecurityLogger=${hadoop.security.logger}
|
||||
|
||||
# hdfs audit logging
|
||||
|
||||
hdfs.audit.logger=INFO,console
|
||||
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
|
||||
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
|
||||
log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
|
||||
log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
|
||||
|
||||
# mapred audit logging
|
||||
|
||||
mapred.audit.logger=INFO,console
|
||||
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
|
||||
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
|
||||
log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
|
||||
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
|
||||
|
||||
#
|
||||
# Rolling File Appender
|
||||
#
|
||||
|
||||
#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
|
||||
#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
|
||||
|
||||
# Logfile size and and 30-day backups
|
||||
#log4j.appender.RFA.MaxFileSize=1MB
|
||||
#log4j.appender.RFA.MaxBackupIndex=30
|
||||
|
||||
#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
||||
|
||||
#
|
||||
# FSNamesystem Audit logging
|
||||
# All audit events are logged at INFO level
|
||||
#
|
||||
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
|
||||
|
||||
# Custom Logging levels
|
||||
|
||||
#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
|
||||
#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
|
||||
#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
|
||||
|
||||
# Jets3t library
|
||||
log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
|
||||
|
||||
#
|
||||
# Event Counter Appender
|
||||
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
|
||||
#
|
||||
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
|
||||
|
||||
#
|
||||
# Job Summary Appender
|
||||
#
|
||||
log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
|
||||
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
|
||||
log4j.appender.JSA.DatePattern=.yyyy-MM-dd
|
||||
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
|
||||
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
|
||||
#
|
||||
# MapReduce Audit Log Appender
|
||||
#
|
||||
|
||||
# Set the MapReduce audit log filename
|
||||
#hadoop.mapreduce.audit.log.file=hadoop-mapreduce.audit.log
|
||||
|
||||
# Appender for AuditLogger.
|
||||
# Requires the following system properties to be set
|
||||
# - hadoop.log.dir (Hadoop Log directory)
|
||||
# - hadoop.mapreduce.audit.log.file (MapReduce audit log filename)
|
||||
|
||||
#log4j.logger.org.apache.hadoop.mapred.AuditLogger=INFO,MRAUDIT
|
||||
#log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
|
||||
#log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
|
||||
#log4j.appender.MRAUDIT.File=${hadoop.log.dir}/${hadoop.mapreduce.audit.log.file}
|
||||
#log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
|
||||
#log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
|
||||
#
|
||||
# Yarn ResourceManager Application Summary Log
|
||||
#
|
||||
# Set the ResourceManager summary log filename
|
||||
#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
|
||||
# Set the ResourceManager summary log level and appender
|
||||
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
|
||||
|
||||
# Appender for ResourceManager Application Summary Log - rolled daily
|
||||
# Requires the following properties to be set
|
||||
# - hadoop.log.dir (Hadoop Log directory)
|
||||
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
|
||||
# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
|
||||
|
||||
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
|
||||
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
|
||||
#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
|
||||
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
|
||||
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
|
|
@ -808,8 +808,8 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.signature.secret</name>
|
||||
<value>hadoop</value>
|
||||
<name>hadoop.http.authentication.signature.secret.file</name>
|
||||
<value>${user.home}/hadoop-http-auth-signature-secret</value>
|
||||
<description>
|
||||
The signature secret for signing the authentication tokens.
|
||||
If not set a random secret is generated at startup time.
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TestConfiguration extends TestCase {
|
|||
}
|
||||
|
||||
private void startConfig() throws IOException{
|
||||
out.write("<?xml version=\"1.0\"?>\n");
|
||||
out.write("<?xml version=\"1.1\"?>\n");
|
||||
out.write("<configuration>\n");
|
||||
}
|
||||
|
||||
|
@ -221,6 +221,18 @@ public class TestConfiguration extends TestCase {
|
|||
assertEquals("this contains a comment", conf.get("my.comment"));
|
||||
}
|
||||
|
||||
public void testControlAInValue() throws IOException {
|
||||
out = new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("my.char", "");
|
||||
appendProperty("my.string", "somestring");
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
assertEquals("\u0001", conf.get("my.char"));
|
||||
assertEquals("some\u0001string", conf.get("my.string"));
|
||||
}
|
||||
|
||||
public void testTrim() throws IOException {
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
|
@ -298,7 +310,7 @@ public class TestConfiguration extends TestCase {
|
|||
conf.writeXml(baos);
|
||||
String result = baos.toString();
|
||||
assertTrue("Result has proper header", result.startsWith(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>"));
|
||||
"<?xml version=\"1.1\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>"));
|
||||
assertTrue("Result has proper footer", result.endsWith("</configuration>"));
|
||||
}
|
||||
|
||||
|
|
|
@ -20,40 +20,48 @@ package org.apache.hadoop.fs;
|
|||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/** This test LocalDirAllocator works correctly;
|
||||
* Every test case uses different buffer dirs to
|
||||
* Every test case uses different buffer dirs to
|
||||
* enforce the AllocatorPerContext initialization.
|
||||
* This test does not run on Cygwin because under Cygwin
|
||||
* a directory can be created in a read-only directory
|
||||
* which breaks this test.
|
||||
*/
|
||||
public class TestLocalDirAllocator extends TestCase {
|
||||
*/
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestLocalDirAllocator {
|
||||
final static private Configuration conf = new Configuration();
|
||||
final static private String BUFFER_DIR_ROOT = "build/test/temp";
|
||||
final static private String ABSOLUTE_DIR_ROOT;
|
||||
final static private String QUALIFIED_DIR_ROOT;
|
||||
final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT);
|
||||
final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT);
|
||||
final static private String BUFFER_DIR[] = new String[] {
|
||||
BUFFER_DIR_ROOT+"/tmp0", BUFFER_DIR_ROOT+"/tmp1", BUFFER_DIR_ROOT+"/tmp2",
|
||||
BUFFER_DIR_ROOT+"/tmp3", BUFFER_DIR_ROOT+"/tmp4", BUFFER_DIR_ROOT+"/tmp5",
|
||||
BUFFER_DIR_ROOT+"/tmp6"};
|
||||
final static private Path BUFFER_PATH[] = new Path[] {
|
||||
new Path(BUFFER_DIR[0]), new Path(BUFFER_DIR[1]), new Path(BUFFER_DIR[2]),
|
||||
new Path(BUFFER_DIR[3]), new Path(BUFFER_DIR[4]), new Path(BUFFER_DIR[5]),
|
||||
new Path(BUFFER_DIR[6])};
|
||||
final static private String CONTEXT = "dfs.client.buffer.dir";
|
||||
final static private String CONTEXT = "fs.client.buffer.dir";
|
||||
final static private String FILENAME = "block";
|
||||
final static private LocalDirAllocator dirAllocator =
|
||||
final static private LocalDirAllocator dirAllocator =
|
||||
new LocalDirAllocator(CONTEXT);
|
||||
static LocalFileSystem localFs;
|
||||
final static private boolean isWindows =
|
||||
System.getProperty("os.name").startsWith("Windows");
|
||||
final static int SMALL_FILE_SIZE = 100;
|
||||
final static private String RELATIVE = "/RELATIVE";
|
||||
final static private String ABSOLUTE = "/ABSOLUTE";
|
||||
final static private String QUALIFIED = "/QUALIFIED";
|
||||
final private String ROOT;
|
||||
final private String PREFIX;
|
||||
|
||||
static {
|
||||
try {
|
||||
localFs = FileSystem.getLocal(conf);
|
||||
|
@ -63,170 +71,214 @@ public class TestLocalDirAllocator extends TestCase {
|
|||
e.printStackTrace();
|
||||
System.exit(-1);
|
||||
}
|
||||
|
||||
ABSOLUTE_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
|
||||
BUFFER_DIR_ROOT).toUri().getPath();
|
||||
QUALIFIED_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
|
||||
BUFFER_DIR_ROOT).toUri().toString();
|
||||
}
|
||||
|
||||
public TestLocalDirAllocator(String root, String prefix) {
|
||||
ROOT = root;
|
||||
PREFIX = prefix;
|
||||
}
|
||||
|
||||
@Parameters
|
||||
public static Collection<Object[]> params() {
|
||||
Object [][] data = new Object[][] {
|
||||
{ BUFFER_DIR_ROOT, RELATIVE },
|
||||
{ ABSOLUTE_DIR_ROOT, ABSOLUTE },
|
||||
{ QUALIFIED_DIR_ROOT, QUALIFIED }
|
||||
};
|
||||
|
||||
return Arrays.asList(data);
|
||||
}
|
||||
|
||||
private static void rmBufferDirs() throws IOException {
|
||||
assertTrue(!localFs.exists(BUFFER_PATH_ROOT) ||
|
||||
localFs.delete(BUFFER_PATH_ROOT, true));
|
||||
}
|
||||
|
||||
private void validateTempDirCreation(int i) throws IOException {
|
||||
|
||||
private static void validateTempDirCreation(String dir) throws IOException {
|
||||
File result = createTempFile(SMALL_FILE_SIZE);
|
||||
assertTrue("Checking for " + BUFFER_DIR[i] + " in " + result + " - FAILED!",
|
||||
result.getPath().startsWith(new File(BUFFER_DIR[i], FILENAME).getPath()));
|
||||
assertTrue("Checking for " + dir + " in " + result + " - FAILED!",
|
||||
result.getPath().startsWith(new Path(dir, FILENAME).toUri().getPath()));
|
||||
}
|
||||
|
||||
private File createTempFile() throws IOException {
|
||||
File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
|
||||
result.delete();
|
||||
return result;
|
||||
|
||||
private static File createTempFile() throws IOException {
|
||||
return createTempFile(-1);
|
||||
}
|
||||
|
||||
private File createTempFile(long size) throws IOException {
|
||||
|
||||
private static File createTempFile(long size) throws IOException {
|
||||
File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
|
||||
result.delete();
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
|
||||
|
||||
private String buildBufferDir(String dir, int i) {
|
||||
return dir + PREFIX + i;
|
||||
}
|
||||
|
||||
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
|
||||
* The second dir exists & is RW
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void test0() throws Exception {
|
||||
if (isWindows) return;
|
||||
String dir0 = buildBufferDir(ROOT, 0);
|
||||
String dir1 = buildBufferDir(ROOT, 1);
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[0]+","+BUFFER_DIR[1]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
|
||||
conf.set(CONTEXT, dir0 + "," + dir1);
|
||||
assertTrue(localFs.mkdirs(new Path(dir1)));
|
||||
BUFFER_ROOT.setReadOnly();
|
||||
validateTempDirCreation(1);
|
||||
validateTempDirCreation(1);
|
||||
validateTempDirCreation(dir1);
|
||||
validateTempDirCreation(dir1);
|
||||
} finally {
|
||||
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
/** Two buffer dirs. The first dir exists & is on a read-only disk;
|
||||
|
||||
/** Two buffer dirs. The first dir exists & is on a read-only disk;
|
||||
* The second dir exists & is RW
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void test1() throws Exception {
|
||||
if (isWindows) return;
|
||||
String dir1 = buildBufferDir(ROOT, 1);
|
||||
String dir2 = buildBufferDir(ROOT, 2);
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[2]));
|
||||
conf.set(CONTEXT, dir1 + "," + dir2);
|
||||
assertTrue(localFs.mkdirs(new Path(dir2)));
|
||||
BUFFER_ROOT.setReadOnly();
|
||||
validateTempDirCreation(2);
|
||||
validateTempDirCreation(2);
|
||||
validateTempDirCreation(dir2);
|
||||
validateTempDirCreation(dir2);
|
||||
} finally {
|
||||
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
/** Two buffer dirs. Both do not exist but on a RW disk.
|
||||
* Check if tmp dirs are allocated in a round-robin
|
||||
* Check if tmp dirs are allocated in a round-robin
|
||||
*/
|
||||
@Test
|
||||
public void test2() throws Exception {
|
||||
if (isWindows) return;
|
||||
String dir2 = buildBufferDir(ROOT, 2);
|
||||
String dir3 = buildBufferDir(ROOT, 3);
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[2]+","+BUFFER_DIR[3]);
|
||||
conf.set(CONTEXT, dir2 + "," + dir3);
|
||||
|
||||
// create the first file, and then figure the round-robin sequence
|
||||
createTempFile(SMALL_FILE_SIZE);
|
||||
int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3;
|
||||
int secondDirIdx = (firstDirIdx == 2) ? 3 : 2;
|
||||
|
||||
|
||||
// check if tmp dirs are allocated in a round-robin manner
|
||||
validateTempDirCreation(firstDirIdx);
|
||||
validateTempDirCreation(secondDirIdx);
|
||||
validateTempDirCreation(firstDirIdx);
|
||||
validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
|
||||
validateTempDirCreation(buildBufferDir(ROOT, secondDirIdx));
|
||||
validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
|
||||
} finally {
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
/** Two buffer dirs. Both exists and on a R/W disk.
|
||||
/** Two buffer dirs. Both exists and on a R/W disk.
|
||||
* Later disk1 becomes read-only.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void test3() throws Exception {
|
||||
if (isWindows) return;
|
||||
String dir3 = buildBufferDir(ROOT, 3);
|
||||
String dir4 = buildBufferDir(ROOT, 4);
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[3]+","+BUFFER_DIR[4]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[3]));
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[4]));
|
||||
|
||||
// create the first file with size, and then figure the round-robin sequence
|
||||
conf.set(CONTEXT, dir3 + "," + dir4);
|
||||
assertTrue(localFs.mkdirs(new Path(dir3)));
|
||||
assertTrue(localFs.mkdirs(new Path(dir4)));
|
||||
|
||||
// Create the first small file
|
||||
createTempFile(SMALL_FILE_SIZE);
|
||||
|
||||
// Determine the round-robin sequence
|
||||
int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
|
||||
validateTempDirCreation(nextDirIdx);
|
||||
validateTempDirCreation(buildBufferDir(ROOT, nextDirIdx));
|
||||
|
||||
// change buffer directory 2 to be read only
|
||||
new File(BUFFER_DIR[4]).setReadOnly();
|
||||
validateTempDirCreation(3);
|
||||
validateTempDirCreation(3);
|
||||
new File(new Path(dir4).toUri().getPath()).setReadOnly();
|
||||
validateTempDirCreation(dir3);
|
||||
validateTempDirCreation(dir3);
|
||||
} finally {
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Two buffer dirs, on read-write disk.
|
||||
*
|
||||
*
|
||||
* Try to create a whole bunch of files.
|
||||
* Verify that they do indeed all get created where they should.
|
||||
*
|
||||
*
|
||||
* Would ideally check statistical properties of distribution, but
|
||||
* we don't have the nerve to risk false-positives here.
|
||||
*
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
static final int TRIALS = 100;
|
||||
@Test
|
||||
public void test4() throws Exception {
|
||||
if (isWindows) return;
|
||||
String dir5 = buildBufferDir(ROOT, 5);
|
||||
String dir6 = buildBufferDir(ROOT, 6);
|
||||
try {
|
||||
|
||||
conf.set(CONTEXT, BUFFER_DIR[5]+","+BUFFER_DIR[6]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[5]));
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[6]));
|
||||
|
||||
conf.set(CONTEXT, dir5 + "," + dir6);
|
||||
assertTrue(localFs.mkdirs(new Path(dir5)));
|
||||
assertTrue(localFs.mkdirs(new Path(dir6)));
|
||||
|
||||
int inDir5=0, inDir6=0;
|
||||
for(int i = 0; i < TRIALS; ++i) {
|
||||
File result = createTempFile();
|
||||
if(result.getPath().startsWith(new File(BUFFER_DIR[5], FILENAME).getPath())) {
|
||||
if(result.getPath().startsWith(
|
||||
new Path(dir5, FILENAME).toUri().getPath())) {
|
||||
inDir5++;
|
||||
} else if(result.getPath().startsWith(new File(BUFFER_DIR[6], FILENAME).getPath())) {
|
||||
} else if(result.getPath().startsWith(
|
||||
new Path(dir6, FILENAME).toUri().getPath())) {
|
||||
inDir6++;
|
||||
}
|
||||
result.delete();
|
||||
}
|
||||
|
||||
assertTrue( inDir5 + inDir6 == TRIALS);
|
||||
|
||||
|
||||
assertTrue(inDir5 + inDir6 == TRIALS);
|
||||
|
||||
} finally {
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
|
||||
|
||||
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
|
||||
* The second dir exists & is RW
|
||||
* getLocalPathForWrite with checkAccess set to false should create a parent
|
||||
* directory. With checkAccess true, the directory should not be created.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testLocalPathForWriteDirCreation() throws IOException {
|
||||
String dir0 = buildBufferDir(ROOT, 0);
|
||||
String dir1 = buildBufferDir(ROOT, 1);
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[0] + "," + BUFFER_DIR[1]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
|
||||
conf.set(CONTEXT, dir0 + "," + dir1);
|
||||
assertTrue(localFs.mkdirs(new Path(dir1)));
|
||||
BUFFER_ROOT.setReadOnly();
|
||||
Path p1 =
|
||||
dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
|
||||
dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
|
||||
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
|
||||
|
||||
Path p2 =
|
||||
dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
|
||||
false);
|
||||
dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
|
||||
false);
|
||||
try {
|
||||
localFs.getFileStatus(p2.getParent());
|
||||
} catch (Exception e) {
|
||||
|
@ -237,5 +289,26 @@ public class TestLocalDirAllocator extends TestCase {
|
|||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Test no side effect files are left over. After creating a temp
|
||||
* temp file, remove both the temp file and its parent. Verify that
|
||||
* no files or directories are left over as can happen when File objects
|
||||
* are mistakenly created from fully qualified path strings.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testNoSideEffects() throws IOException {
|
||||
if (isWindows) return;
|
||||
String dir = buildBufferDir(ROOT, 0);
|
||||
try {
|
||||
conf.set(CONTEXT, dir);
|
||||
File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
|
||||
assertTrue(result.delete());
|
||||
assertTrue(result.getParentFile().delete());
|
||||
assertFalse(new File(dir).exists());
|
||||
} finally {
|
||||
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -486,6 +486,9 @@ public class TestTrash extends TestCase {
|
|||
conf.set(FS_TRASH_INTERVAL_KEY, "0.2"); // 12 seconds
|
||||
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
|
||||
conf.set(FS_TRASH_CHECKPOINT_INTERVAL_KEY, "0.1"); // 6 seconds
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
conf.set("fs.default.name", fs.getUri().toString());
|
||||
|
||||
Trash trash = new Trash(conf);
|
||||
|
||||
// Start Emptier in background
|
||||
|
@ -493,8 +496,6 @@ public class TestTrash extends TestCase {
|
|||
Thread emptierThread = new Thread(emptier);
|
||||
emptierThread.start();
|
||||
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
conf.set("fs.defaultFS", fs.getUri().toString());
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
shell.init();
|
||||
|
|
|
@ -70,6 +70,21 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
return createServer(TEST, conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create but do not start the test webapp server. The test webapp dir is
|
||||
* prepared/checked in advance.
|
||||
* @param conf the server configuration to use
|
||||
* @return the server instance
|
||||
*
|
||||
* @throws IOException if a problem occurs
|
||||
* @throws AssertionError if a condition was not met
|
||||
*/
|
||||
public static HttpServer createTestServer(Configuration conf,
|
||||
String[] pathSpecs) throws IOException {
|
||||
prepareTestWebapp();
|
||||
return createServer(TEST, conf, pathSpecs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare the test webapp by creating the directory from the test properties
|
||||
* fail if the directory cannot be created.
|
||||
|
@ -104,6 +119,18 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
throws IOException {
|
||||
return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
|
||||
}
|
||||
/**
|
||||
* Create an HttpServer instance for the given webapp
|
||||
* @param webapp the webapp to work with
|
||||
* @param conf the configuration to use for the server
|
||||
* @param pathSpecs the paths specifications the server will service
|
||||
* @return the server
|
||||
* @throws IOException if it could not be created
|
||||
*/
|
||||
public static HttpServer createServer(String webapp, Configuration conf,
|
||||
String[] pathSpecs) throws IOException {
|
||||
return new HttpServer(webapp, "0.0.0.0", 0, true, conf, pathSpecs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and start a server with the test webapp
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.ServletRequest;
|
||||
import javax.servlet.ServletResponse;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestPathFilter extends HttpServerFunctionalTest {
|
||||
static final Log LOG = LogFactory.getLog(HttpServer.class);
|
||||
static final Set<String> RECORDS = new TreeSet<String>();
|
||||
|
||||
/** A very simple filter that records accessed uri's */
|
||||
static public class RecordingFilter implements Filter {
|
||||
private FilterConfig filterConfig = null;
|
||||
|
||||
public void init(FilterConfig filterConfig) {
|
||||
this.filterConfig = filterConfig;
|
||||
}
|
||||
|
||||
public void destroy() {
|
||||
this.filterConfig = null;
|
||||
}
|
||||
|
||||
public void doFilter(ServletRequest request, ServletResponse response,
|
||||
FilterChain chain) throws IOException, ServletException {
|
||||
if (filterConfig == null)
|
||||
return;
|
||||
|
||||
String uri = ((HttpServletRequest)request).getRequestURI();
|
||||
LOG.info("filtering " + uri);
|
||||
RECORDS.add(uri);
|
||||
chain.doFilter(request, response);
|
||||
}
|
||||
|
||||
/** Configuration for RecordingFilter */
|
||||
static public class Initializer extends FilterInitializer {
|
||||
public Initializer() {}
|
||||
|
||||
public void initFilter(FilterContainer container, Configuration conf) {
|
||||
container.addFilter("recording", RecordingFilter.class.getName(), null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** access a url, ignoring some IOException such as the page does not exist */
|
||||
static void access(String urlstring) throws IOException {
|
||||
LOG.warn("access " + urlstring);
|
||||
URL url = new URL(urlstring);
|
||||
|
||||
URLConnection connection = url.openConnection();
|
||||
connection.connect();
|
||||
|
||||
try {
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(
|
||||
connection.getInputStream()));
|
||||
try {
|
||||
for(; in.readLine() != null; );
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
} catch(IOException ioe) {
|
||||
LOG.warn("urlstring=" + urlstring, ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPathSpecFilters() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
//start a http server with CountingFilter
|
||||
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
|
||||
RecordingFilter.Initializer.class.getName());
|
||||
String[] pathSpecs = { "/path", "/path/*" };
|
||||
HttpServer http = createTestServer(conf, pathSpecs);
|
||||
http.start();
|
||||
|
||||
final String baseURL = "/path";
|
||||
final String baseSlashURL = "/path/";
|
||||
final String addedURL = "/path/nodes";
|
||||
final String addedSlashURL = "/path/nodes/";
|
||||
final String longURL = "/path/nodes/foo/job";
|
||||
final String rootURL = "/";
|
||||
final String allURL = "/*";
|
||||
|
||||
final String[] filteredUrls = {baseURL, baseSlashURL, addedURL,
|
||||
addedSlashURL, longURL};
|
||||
final String[] notFilteredUrls = {rootURL, allURL};
|
||||
|
||||
// access the urls and verify our paths specs got added to the
|
||||
// filters
|
||||
final String prefix = "http://localhost:" + http.getPort();
|
||||
try {
|
||||
for(int i = 0; i < filteredUrls.length; i++) {
|
||||
access(prefix + filteredUrls[i]);
|
||||
}
|
||||
for(int i = 0; i < notFilteredUrls.length; i++) {
|
||||
access(prefix + notFilteredUrls[i]);
|
||||
}
|
||||
} finally {
|
||||
http.stop();
|
||||
}
|
||||
|
||||
LOG.info("RECORDS = " + RECORDS);
|
||||
|
||||
//verify records
|
||||
for(int i = 0; i < filteredUrls.length; i++) {
|
||||
assertTrue(RECORDS.remove(filteredUrls[i]));
|
||||
}
|
||||
assertTrue(RECORDS.isEmpty());
|
||||
}
|
||||
}
|
|
@ -18,13 +18,17 @@
|
|||
package org.apache.hadoop.net;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.Socket;
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Enumeration;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
|
@ -88,4 +92,32 @@ public class TestNetUtils {
|
|||
fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
|
||||
*/
|
||||
@Test
|
||||
public void testIsLocalAddress() throws Exception {
|
||||
// Test - local host is local address
|
||||
assertTrue(NetUtils.isLocalAddress(InetAddress.getLocalHost()));
|
||||
|
||||
// Test - all addresses bound network interface is local address
|
||||
Enumeration<NetworkInterface> interfaces = NetworkInterface
|
||||
.getNetworkInterfaces();
|
||||
if (interfaces != null) { // Iterate through all network interfaces
|
||||
while (interfaces.hasMoreElements()) {
|
||||
NetworkInterface i = interfaces.nextElement();
|
||||
Enumeration<InetAddress> addrs = i.getInetAddresses();
|
||||
if (addrs == null) {
|
||||
continue;
|
||||
}
|
||||
// Iterate through all the addresses of a network interface
|
||||
while (addrs.hasMoreElements()) {
|
||||
InetAddress addr = addrs.nextElement();
|
||||
assertTrue(NetUtils.isLocalAddress(addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,14 +25,28 @@ import org.mockito.Mockito;
|
|||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.Writer;
|
||||
import java.util.Map;
|
||||
|
||||
public class TestAuthenticationFilter extends TestCase {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testConfiguration() {
|
||||
public void testConfiguration() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("hadoop.http.authentication.foo", "bar");
|
||||
|
||||
File testDir = new File(System.getProperty("test.build.data",
|
||||
"target/test-dir"));
|
||||
testDir.mkdirs();
|
||||
File secretFile = new File(testDir, "http-secret.txt");
|
||||
Writer writer = new FileWriter(new File(testDir, "http-secret.txt"));
|
||||
writer.write("hadoop");
|
||||
writer.close();
|
||||
conf.set(AuthenticationFilterInitializer.PREFIX +
|
||||
AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE,
|
||||
secretFile.getAbsolutePath());
|
||||
|
||||
FilterContainer container = Mockito.mock(FilterContainer.class);
|
||||
Mockito.doAnswer(
|
||||
|
|
|
@ -16,6 +16,9 @@ Trunk (unreleased changes)
|
|||
HDFS-2318. Provide authentication to webhdfs using SPNEGO and delegation
|
||||
tokens. (szetszwo)
|
||||
|
||||
HDFS-2340. Support getFileBlockLocations and getDelegationToken in webhdfs.
|
||||
(szetszwo)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
|
||||
|
@ -35,6 +38,18 @@ Trunk (unreleased changes)
|
|||
not use ArrayWritable for writing non-array items. (Uma Maheswara Rao G
|
||||
via szetszwo)
|
||||
|
||||
HDFS-2351 Change Namenode and Datanode to register each of their protocols
|
||||
seperately. (Sanjay Radia)
|
||||
|
||||
HDFS-2356. Support case insensitive query parameter names in webhdfs.
|
||||
(szetszwo)
|
||||
|
||||
HDFS-2368. Move SPNEGO conf properties from hdfs-default.xml to
|
||||
hdfs-site.xml. (szetszwo)
|
||||
|
||||
HDFS-2355. Federation: enable using the same configuration file across
|
||||
all the nodes in the cluster. (suresh)
|
||||
|
||||
BUG FIXES
|
||||
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
|
||||
|
||||
|
@ -57,6 +72,17 @@ Trunk (unreleased changes)
|
|||
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
|
||||
Rao G via szetszwo)
|
||||
|
||||
HDFS-46. Change default namespace quota of root directory from
|
||||
Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
|
||||
|
||||
HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction.
|
||||
(szetszwo)
|
||||
|
||||
HDFS-2373. Commands using webhdfs and hftp print unnecessary debug
|
||||
info on the console with security enabled. (Arpit Gupta via suresh)
|
||||
|
||||
HDFS-2361. hftp is broken, fixed username checks in JspHelper. (jitendra)
|
||||
|
||||
Release 0.23.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -739,6 +765,12 @@ Release 0.23.0 - Unreleased
|
|||
HDFS-1217. Change some NameNode methods from public to package private.
|
||||
(Laxman via szetszwo)
|
||||
|
||||
HDFS-2332. Add test for HADOOP-7629 (using an immutable FsPermission
|
||||
object as an RPC parameter fails). (todd)
|
||||
|
||||
HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
|
||||
to BlockManager. (Uma Maheswara Rao G via szetszwo)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
|
||||
|
@ -1607,7 +1639,11 @@ Release 0.22.0 - Unreleased
|
|||
HDFS-2232. Generalize regular expressions in TestHDFSCLI.
|
||||
(Plamen Jeliazkov via shv)
|
||||
|
||||
HDFS-2290. Block with corrupt replica is not getting replicated.
|
||||
(Benoy Antony via shv)
|
||||
|
||||
Release 0.21.1 - Unreleased
|
||||
|
||||
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
|
||||
|
||||
HDFS-874. TestHDFSFileContextMainOperations fails on weirdly
|
||||
|
|
|
@ -38,6 +38,7 @@ import java.util.Random;
|
|||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
|
@ -577,17 +578,6 @@ public class DFSUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the configured nameservice Id
|
||||
*
|
||||
* @param conf
|
||||
* Configuration object to lookup the nameserviceId
|
||||
* @return nameserviceId string from conf
|
||||
*/
|
||||
public static String getNameServiceId(Configuration conf) {
|
||||
return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
|
||||
}
|
||||
|
||||
/** Return used as percentage of capacity */
|
||||
public static float getPercentUsed(long used, long capacity) {
|
||||
return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity;
|
||||
|
@ -707,4 +697,77 @@ public class DFSUtil {
|
|||
// TODO:HA configuration changes pending
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the {@link NameNode} based on namenode RPC address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getNamenodeNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the BackupNode based on backup node RPC address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getBackupNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the secondary node based on secondary http address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getSecondaryNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the nameservice Id by matching the {@code addressKey} with the
|
||||
* the address of the local node.
|
||||
*
|
||||
* If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
|
||||
* configured, this method determines the nameservice Id by matching the local
|
||||
* nodes address with the configured addresses. When a match is found, it
|
||||
* returns the nameservice Id from the corresponding configuration key.
|
||||
*
|
||||
* @param conf Configuration
|
||||
* @param addressKey configuration key to get the address.
|
||||
* @return name service Id on success, null on failure.
|
||||
* @throws HadoopIllegalArgumentException on error
|
||||
*/
|
||||
private static String getNameServiceId(Configuration conf, String addressKey) {
|
||||
String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
|
||||
if (nameserviceId != null) {
|
||||
return nameserviceId;
|
||||
}
|
||||
|
||||
Collection<String> ids = getNameServiceIds(conf);
|
||||
if (ids == null || ids.size() == 0) {
|
||||
// Not federation configuration, hence no nameservice Id
|
||||
return null;
|
||||
}
|
||||
|
||||
// Match the rpc address with that of local address
|
||||
int found = 0;
|
||||
for (String id : ids) {
|
||||
String addr = conf.get(getNameServiceIdKey(addressKey, id));
|
||||
InetSocketAddress s = NetUtils.createSocketAddr(addr);
|
||||
if (NetUtils.isLocalAddress(s.getAddress())) {
|
||||
nameserviceId = id;
|
||||
found++;
|
||||
}
|
||||
}
|
||||
if (found > 1) { // Only one address must match the local address
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Configuration has multiple RPC addresses that matches "
|
||||
+ "the local node's address. Please configure the system with "
|
||||
+ "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
|
||||
}
|
||||
if (found == 0) {
|
||||
throw new HadoopIllegalArgumentException("Configuration address "
|
||||
+ addressKey + " is missing in configuration with name service Id");
|
||||
}
|
||||
return nameserviceId;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -115,6 +115,26 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
this.location = location;
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
/** Constructor */
|
||||
public DatanodeInfo(final String name, final String storageID,
|
||||
final int infoPort, final int ipcPort,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
||||
final String networkLocation, final String hostName,
|
||||
final AdminStates adminState) {
|
||||
super(name, storageID, infoPort, ipcPort);
|
||||
|
||||
this.capacity = capacity;
|
||||
this.dfsUsed = dfsUsed;
|
||||
this.remaining = remaining;
|
||||
this.blockPoolUsed = blockPoolUsed;
|
||||
this.lastUpdate = lastUpdate;
|
||||
this.xceiverCount = xceiverCount;
|
||||
this.location = networkLocation;
|
||||
this.hostName = hostName;
|
||||
this.adminState = adminState;
|
||||
}
|
||||
|
||||
/** The raw capacity. */
|
||||
public long getCapacity() { return capacity; }
|
||||
|
|
|
@ -308,6 +308,11 @@ public class BlockManager {
|
|||
/** Dump meta data to out. */
|
||||
public void metaSave(PrintWriter out) {
|
||||
assert namesystem.hasWriteLock();
|
||||
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
|
||||
datanodeManager.fetchDatanodes(live, dead, false);
|
||||
out.println("Live Datanodes: " + live.size());
|
||||
out.println("Dead Datanodes: " + dead.size());
|
||||
//
|
||||
// Dump contents of neededReplication
|
||||
//
|
||||
|
@ -842,7 +847,7 @@ public class BlockManager {
|
|||
|
||||
// Add this replica to corruptReplicas Map
|
||||
corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
|
||||
if (countNodes(storedBlock).liveReplicas() > inode.getReplication()) {
|
||||
if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
|
||||
// the block is over-replicated so invalidate the replicas immediately
|
||||
invalidateBlock(storedBlock, node);
|
||||
} else if (namesystem.isPopulatingReplQueues()) {
|
||||
|
@ -867,7 +872,7 @@ public class BlockManager {
|
|||
// Check how many copies we have of the block. If we have at least one
|
||||
// copy on a live node, then we can delete it.
|
||||
int count = countNodes(blk).liveReplicas();
|
||||
if (count > 1) {
|
||||
if (count >= 1) {
|
||||
addToInvalidates(blk, dn);
|
||||
removeStoredBlock(blk, node);
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
|
|
|
@ -54,11 +54,13 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
|
||||
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
import org.apache.hadoop.http.HtmlQuoting;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.authentication.util.KerberosName;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
@ -68,7 +70,7 @@ import org.apache.hadoop.util.VersionInfo;
|
|||
public class JspHelper {
|
||||
public static final String CURRENT_CONF = "current.conf";
|
||||
final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
|
||||
public static final String DELEGATION_PARAMETER_NAME = "delegation";
|
||||
public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
|
||||
public static final String NAMENODE_ADDRESS = "nnaddr";
|
||||
static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
|
||||
"=";
|
||||
|
@ -551,7 +553,8 @@ public class JspHelper {
|
|||
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
|
||||
id.readFields(in);
|
||||
ugi = id.getUser();
|
||||
checkUsername(ugi.getUserName(), user);
|
||||
checkUsername(ugi.getShortUserName(), usernameFromQuery);
|
||||
checkUsername(ugi.getShortUserName(), user);
|
||||
ugi.addToken(token);
|
||||
ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
|
||||
} else {
|
||||
|
@ -560,13 +563,11 @@ public class JspHelper {
|
|||
"authenticated by filter");
|
||||
}
|
||||
ugi = UserGroupInformation.createRemoteUser(user);
|
||||
checkUsername(ugi.getShortUserName(), usernameFromQuery);
|
||||
// This is not necessarily true, could have been auth'ed by user-facing
|
||||
// filter
|
||||
ugi.setAuthenticationMethod(secureAuthMethod);
|
||||
}
|
||||
|
||||
checkUsername(user, usernameFromQuery);
|
||||
|
||||
} else { // Security's not on, pull from url
|
||||
ugi = usernameFromQuery == null?
|
||||
getDefaultWebUser(conf) // not specified in request
|
||||
|
@ -579,10 +580,18 @@ public class JspHelper {
|
|||
return ugi;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expected user name should be a short name.
|
||||
*/
|
||||
private static void checkUsername(final String expected, final String name
|
||||
) throws IOException {
|
||||
if (name != null && !name.equals(expected)) {
|
||||
throw new IOException("Usernames not matched: name=" + name
|
||||
if (name == null) {
|
||||
return;
|
||||
}
|
||||
KerberosName u = new KerberosName(name);
|
||||
String shortName = u.getShortName();
|
||||
if (!shortName.equals(expected)) {
|
||||
throw new IOException("Usernames not matched: name=" + shortName
|
||||
+ " != expected=" + expected);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -425,7 +425,7 @@ public class DataNode extends Configured
|
|||
private List<ServicePlugin> plugins;
|
||||
|
||||
// For InterDataNodeProtocol
|
||||
public Server ipcServer;
|
||||
public RPC.Server ipcServer;
|
||||
|
||||
private SecureResources secureResources = null;
|
||||
private AbstractList<File> dataDirs;
|
||||
|
@ -575,11 +575,15 @@ public class DataNode extends Configured
|
|||
private void initIpcServer(Configuration conf) throws IOException {
|
||||
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
|
||||
conf.get("dfs.datanode.ipc.address"));
|
||||
ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(),
|
||||
|
||||
// Add all the RPC protocols that the Datanode implements
|
||||
ipcServer = RPC.getServer(ClientDatanodeProtocol.class, this, ipcAddr.getHostName(),
|
||||
ipcAddr.getPort(),
|
||||
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
|
||||
DFS_DATANODE_HANDLER_COUNT_DEFAULT),
|
||||
false, conf, blockPoolTokenSecretManager);
|
||||
ipcServer.addProtocol(InterDatanodeProtocol.class, this);
|
||||
|
||||
// set service-level authorization security policy
|
||||
if (conf.getBoolean(
|
||||
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSClient;
|
|||
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.web.ParamFilter;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
|
||||
|
@ -66,8 +67,11 @@ import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import com.sun.jersey.spi.container.ResourceFilters;
|
||||
|
||||
/** Web-hdfs DataNode implementation. */
|
||||
@Path("")
|
||||
@ResourceFilters(ParamFilter.class)
|
||||
public class DatanodeWebHdfsMethods {
|
||||
public static final Log LOG = LogFactory.getLog(DatanodeWebHdfsMethods.class);
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
|
@ -372,4 +373,9 @@ public class BackupNode extends NameNode {
|
|||
throw new UnsupportedActionException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getNameServiceId(Configuration conf) {
|
||||
return DFSUtil.getBackupNameServiceId(conf);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ public class FSDirectory implements Closeable {
|
|||
this.cond = dirLock.writeLock().newCondition();
|
||||
rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
|
||||
ns.createFsOwnerPermissions(new FsPermission((short)0755)),
|
||||
Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
|
||||
Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
|
||||
this.fsImage = fsImage;
|
||||
int configuredLimit = conf.getInt(
|
||||
DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
|
||||
|
|
|
@ -564,11 +564,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
out.println(totalInodes + " files and directories, " + totalBlocks
|
||||
+ " blocks = " + (totalInodes + totalBlocks) + " total");
|
||||
|
||||
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
|
||||
blockManager.getDatanodeManager().fetchDatanodes(live, dead, false);
|
||||
out.println("Live Datanodes: "+live.size());
|
||||
out.println("Dead Datanodes: "+dead.size());
|
||||
blockManager.metaSave(out);
|
||||
|
||||
out.flush();
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ha.HealthCheckFailedException;
|
||||
|
@ -380,7 +381,6 @@ public class NameNode {
|
|||
* @param conf the configuration
|
||||
*/
|
||||
protected void initialize(Configuration conf) throws IOException {
|
||||
initializeGenericKeys(conf);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
loginAsNameNodeUser(conf);
|
||||
|
||||
|
@ -513,10 +513,14 @@ public class NameNode {
|
|||
this.haEnabled = DFSUtil.isHAEnabled(conf);
|
||||
this.state = !haEnabled ? ACTIVE_STATE : STANDBY_STATE;
|
||||
try {
|
||||
initializeGenericKeys(conf, getNameServiceId(conf));
|
||||
initialize(conf);
|
||||
} catch (IOException e) {
|
||||
this.stop();
|
||||
throw e;
|
||||
} catch (HadoopIllegalArgumentException e) {
|
||||
this.stop();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -821,16 +825,16 @@ public class NameNode {
|
|||
* @param conf
|
||||
* Configuration object to lookup specific key and to set the value
|
||||
* to the key passed. Note the conf object is modified
|
||||
* @param nameserviceId name service Id
|
||||
* @see DFSUtil#setGenericConf(Configuration, String, String...)
|
||||
*/
|
||||
public static void initializeGenericKeys(Configuration conf) {
|
||||
final String nameserviceId = DFSUtil.getNameServiceId(conf);
|
||||
public static void initializeGenericKeys(Configuration conf, String
|
||||
nameserviceId) {
|
||||
if ((nameserviceId == null) || nameserviceId.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
|
||||
|
||||
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
|
||||
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
|
||||
|
@ -838,6 +842,14 @@ public class NameNode {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the name service Id for the node
|
||||
* @return name service Id or null if federation is not configured
|
||||
*/
|
||||
protected String getNameServiceId(Configuration conf) {
|
||||
return DFSUtil.getNamenodeNameServiceId(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public static void main(String argv[]) throws Exception {
|
||||
|
|
|
@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
|
@ -145,10 +146,17 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
serviceRpcServer = null;
|
||||
serviceRPCAddress = null;
|
||||
}
|
||||
this.server = RPC.getServer(NamenodeProtocols.class, this,
|
||||
// Add all the RPC protocols that the namenode implements
|
||||
this.server = RPC.getServer(ClientProtocol.class, this,
|
||||
socAddr.getHostName(), socAddr.getPort(),
|
||||
handlerCount, false, conf,
|
||||
namesystem.getDelegationTokenSecretManager());
|
||||
this.server.addProtocol(DatanodeProtocol.class, this);
|
||||
this.server.addProtocol(NamenodeProtocol.class, this);
|
||||
this.server.addProtocol(RefreshAuthorizationPolicyProtocol.class, this);
|
||||
this.server.addProtocol(RefreshUserMappingsProtocol.class, this);
|
||||
this.server.addProtocol(GetUserMappingsProtocol.class, this);
|
||||
|
||||
|
||||
// set service-level authorization security policy
|
||||
if (serviceAuthEnabled =
|
||||
|
@ -971,8 +979,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
|
||||
private static String getClientMachine() {
|
||||
String clientMachine = Server.getRemoteAddress();
|
||||
if (clientMachine == null) {
|
||||
String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
|
||||
if (clientMachine == null) { //not a web client
|
||||
clientMachine = Server.getRemoteAddress();
|
||||
}
|
||||
if (clientMachine == null) { //not a RPC client
|
||||
clientMachine = "";
|
||||
}
|
||||
return clientMachine;
|
||||
|
|
|
@ -38,10 +38,12 @@ import org.apache.commons.cli.ParseException;
|
|||
import org.apache.commons.cli.PosixParser;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
|
@ -173,12 +175,17 @@ public class SecondaryNameNode implements Runnable {
|
|||
public SecondaryNameNode(Configuration conf,
|
||||
CommandLineOpts commandLineOpts) throws IOException {
|
||||
try {
|
||||
NameNode.initializeGenericKeys(conf);
|
||||
NameNode.initializeGenericKeys(conf,
|
||||
DFSUtil.getSecondaryNameServiceId(conf));
|
||||
initialize(conf, commandLineOpts);
|
||||
} catch(IOException e) {
|
||||
shutdown();
|
||||
LOG.fatal("Failed to start secondary namenode. ", e);
|
||||
throw e;
|
||||
} catch(HadoopIllegalArgumentException e) {
|
||||
shutdown();
|
||||
LOG.fatal("Failed to start secondary namenode. ", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.common.JspHelper;
|
|||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.web.JsonUtil;
|
||||
import org.apache.hadoop.hdfs.web.ParamFilter;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
|
||||
|
@ -78,6 +79,7 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.RenewerParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
|
@ -89,10 +91,20 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
|
||||
import com.sun.jersey.spi.container.ResourceFilters;
|
||||
|
||||
/** Web-hdfs NameNode implementation. */
|
||||
@Path("")
|
||||
@ResourceFilters(ParamFilter.class)
|
||||
public class NamenodeWebHdfsMethods {
|
||||
private static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
|
||||
public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
|
||||
|
||||
private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>();
|
||||
|
||||
/** @return the remote client address. */
|
||||
public static String getRemoteAddress() {
|
||||
return REMOTE_ADDRESS.get();
|
||||
}
|
||||
|
||||
private @Context ServletContext context;
|
||||
private @Context HttpServletRequest request;
|
||||
|
@ -215,6 +227,8 @@ public class NamenodeWebHdfsMethods {
|
|||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
public Response run() throws IOException, URISyntaxException {
|
||||
REMOTE_ADDRESS.set(request.getRemoteAddr());
|
||||
try {
|
||||
|
||||
final String fullpath = path.getAbsolutePath();
|
||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||
|
@ -272,6 +286,10 @@ public class NamenodeWebHdfsMethods {
|
|||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
||||
} finally {
|
||||
REMOTE_ADDRESS.set(null);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -301,6 +319,8 @@ public class NamenodeWebHdfsMethods {
|
|||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
public Response run() throws IOException, URISyntaxException {
|
||||
REMOTE_ADDRESS.set(request.getRemoteAddr());
|
||||
try {
|
||||
|
||||
final String fullpath = path.getAbsolutePath();
|
||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||
|
@ -315,6 +335,10 @@ public class NamenodeWebHdfsMethods {
|
|||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
||||
} finally {
|
||||
REMOTE_ADDRESS.set(null);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -335,10 +359,12 @@ public class NamenodeWebHdfsMethods {
|
|||
final OffsetParam offset,
|
||||
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
|
||||
final LengthParam length,
|
||||
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
|
||||
final RenewerParam renewer,
|
||||
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
|
||||
final BufferSizeParam bufferSize
|
||||
) throws IOException, URISyntaxException, InterruptedException {
|
||||
return get(ugi, delegation, ROOT, op, offset, length, bufferSize);
|
||||
return get(ugi, delegation, ROOT, op, offset, length, renewer, bufferSize);
|
||||
}
|
||||
|
||||
/** Handle HTTP GET request. */
|
||||
|
@ -356,19 +382,23 @@ public class NamenodeWebHdfsMethods {
|
|||
final OffsetParam offset,
|
||||
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
|
||||
final LengthParam length,
|
||||
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
|
||||
final RenewerParam renewer,
|
||||
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
|
||||
final BufferSizeParam bufferSize
|
||||
) throws IOException, URISyntaxException, InterruptedException {
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(op + ": " + path + ", ugi=" + ugi
|
||||
+ Param.toSortedString(", ", offset, length, bufferSize));
|
||||
+ Param.toSortedString(", ", offset, length, renewer, bufferSize));
|
||||
}
|
||||
|
||||
|
||||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
public Response run() throws IOException, URISyntaxException {
|
||||
REMOTE_ADDRESS.set(request.getRemoteAddr());
|
||||
try {
|
||||
|
||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||
final String fullpath = path.getAbsolutePath();
|
||||
|
@ -381,6 +411,15 @@ public class NamenodeWebHdfsMethods {
|
|||
op.getValue(), offset.getValue(), offset, length, bufferSize);
|
||||
return Response.temporaryRedirect(uri).build();
|
||||
}
|
||||
case GETFILEBLOCKLOCATIONS:
|
||||
{
|
||||
final long offsetValue = offset.getValue();
|
||||
final Long lengthValue = length.getValue();
|
||||
final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
|
||||
offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
|
||||
final String js = JsonUtil.toJsonString(locatedblocks);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case GETFILESTATUS:
|
||||
{
|
||||
final HdfsFileStatus status = np.getFileInfo(fullpath);
|
||||
|
@ -392,9 +431,20 @@ public class NamenodeWebHdfsMethods {
|
|||
final StreamingOutput streaming = getListingStream(np, fullpath);
|
||||
return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case GETDELEGATIONTOKEN:
|
||||
{
|
||||
final Token<? extends TokenIdentifier> token = generateDelegationToken(
|
||||
namenode, ugi, renewer.getValue());
|
||||
final String js = JsonUtil.toJsonString(token);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
||||
} finally {
|
||||
REMOTE_ADDRESS.set(null);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -462,6 +512,9 @@ public class NamenodeWebHdfsMethods {
|
|||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
public Response run() throws IOException {
|
||||
REMOTE_ADDRESS.set(request.getRemoteAddr());
|
||||
try {
|
||||
|
||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||
final String fullpath = path.getAbsolutePath();
|
||||
|
||||
|
@ -475,6 +528,10 @@ public class NamenodeWebHdfsMethods {
|
|||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
||||
} finally {
|
||||
REMOTE_ADDRESS.set(null);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -149,7 +149,9 @@ public class DelegationTokenFetcher {
|
|||
DataInputStream in = new DataInputStream(
|
||||
new ByteArrayInputStream(token.getIdentifier()));
|
||||
id.readFields(in);
|
||||
System.out.println("Token (" + id + ") for " + token.getService());
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Token (" + id + ") for " + token.getService());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -160,22 +162,28 @@ public class DelegationTokenFetcher {
|
|||
for (Token<?> token : readTokens(tokenFile, conf)) {
|
||||
result = renewDelegationToken(webUrl,
|
||||
(Token<DelegationTokenIdentifier>) token);
|
||||
System.out.println("Renewed token via " + webUrl + " for "
|
||||
+ token.getService() + " until: " + new Date(result));
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Renewed token via " + webUrl + " for "
|
||||
+ token.getService() + " until: " + new Date(result));
|
||||
}
|
||||
}
|
||||
} else if (cancel) {
|
||||
for (Token<?> token : readTokens(tokenFile, conf)) {
|
||||
cancelDelegationToken(webUrl,
|
||||
(Token<DelegationTokenIdentifier>) token);
|
||||
System.out.println("Cancelled token via " + webUrl + " for "
|
||||
+ token.getService());
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Cancelled token via " + webUrl + " for "
|
||||
+ token.getService());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Credentials creds = getDTfromRemote(webUrl, renewer);
|
||||
creds.writeTokenStorageFile(tokenFile, conf);
|
||||
for (Token<?> token : creds.getAllTokens()) {
|
||||
System.out.println("Fetched token via " + webUrl + " for "
|
||||
+ token.getService() + " into " + tokenFile);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Fetched token via " + webUrl + " for "
|
||||
+ token.getService() + " into " + tokenFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -184,24 +192,30 @@ public class DelegationTokenFetcher {
|
|||
for (Token<?> token : readTokens(tokenFile, conf)) {
|
||||
((DistributedFileSystem) fs)
|
||||
.cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
|
||||
System.out.println("Cancelled token for "
|
||||
+ token.getService());
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Cancelled token for "
|
||||
+ token.getService());
|
||||
}
|
||||
}
|
||||
} else if (renew) {
|
||||
long result;
|
||||
for (Token<?> token : readTokens(tokenFile, conf)) {
|
||||
result = ((DistributedFileSystem) fs)
|
||||
.renewDelegationToken((Token<DelegationTokenIdentifier>) token);
|
||||
System.out.println("Renewed token for " + token.getService()
|
||||
+ " until: " + new Date(result));
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Renewed token for " + token.getService()
|
||||
+ " until: " + new Date(result));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Token<?> token = fs.getDelegationToken(renewer);
|
||||
Credentials cred = new Credentials();
|
||||
cred.addToken(token.getService(), token);
|
||||
cred.writeTokenStorageFile(tokenFile, conf);
|
||||
System.out.println("Fetched token for " + token.getService()
|
||||
+ " into " + tokenFile);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Fetched token for " + token.getService()
|
||||
+ " into " + tokenFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
@ -221,6 +235,11 @@ public class DelegationTokenFetcher {
|
|||
} else {
|
||||
url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
|
||||
}
|
||||
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Retrieving token from: " + url);
|
||||
}
|
||||
|
||||
URL remoteURL = new URL(url.toString());
|
||||
SecurityUtil.fetchServiceTicket(remoteURL);
|
||||
URLConnection connection = remoteURL.openConnection();
|
||||
|
|
|
@ -17,19 +17,31 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
/** JSON Utilities */
|
||||
public class JsonUtil {
|
||||
private static final ThreadLocal<Map<String, Object>> jsonMap
|
||||
= new ThreadLocal<Map<String, Object>>() {
|
||||
private static class ThreadLocalMap extends ThreadLocal<Map<String, Object>> {
|
||||
@Override
|
||||
protected Map<String, Object> initialValue() {
|
||||
return new TreeMap<String, Object>();
|
||||
|
@ -41,7 +53,54 @@ public class JsonUtil {
|
|||
m.clear();
|
||||
return m;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static final ThreadLocalMap jsonMap = new ThreadLocalMap();
|
||||
private static final ThreadLocalMap tokenMap = new ThreadLocalMap();
|
||||
private static final ThreadLocalMap datanodeInfoMap = new ThreadLocalMap();
|
||||
private static final ThreadLocalMap extendedBlockMap = new ThreadLocalMap();
|
||||
private static final ThreadLocalMap locatedBlockMap = new ThreadLocalMap();
|
||||
|
||||
private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
|
||||
|
||||
/** Convert a token object to a Json string. */
|
||||
public static String toJsonString(final Token<? extends TokenIdentifier> token
|
||||
) throws IOException {
|
||||
if (token == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<String, Object> m = tokenMap.get();
|
||||
m.put("urlString", token.encodeToUrlString());
|
||||
return JSON.toString(m);
|
||||
}
|
||||
|
||||
/** Convert a Json map to a Token. */
|
||||
public static Token<? extends TokenIdentifier> toToken(
|
||||
final Map<?, ?> m) throws IOException {
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Token<DelegationTokenIdentifier> token
|
||||
= new Token<DelegationTokenIdentifier>();
|
||||
token.decodeFromUrlString((String)m.get("urlString"));
|
||||
return token;
|
||||
}
|
||||
|
||||
/** Convert a Json map to a Token of DelegationTokenIdentifier. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public static Token<DelegationTokenIdentifier> toDelegationToken(
|
||||
final Map<?, ?> m) throws IOException {
|
||||
return (Token<DelegationTokenIdentifier>)toToken(m);
|
||||
}
|
||||
|
||||
/** Convert a Json map to a Token of BlockTokenIdentifier. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public static Token<BlockTokenIdentifier> toBlockToken(
|
||||
final Map<?, ?> m) throws IOException {
|
||||
return (Token<BlockTokenIdentifier>)toToken(m);
|
||||
}
|
||||
|
||||
/** Convert an exception object to a Json string. */
|
||||
public static String toJsonString(final Exception e) {
|
||||
|
@ -77,11 +136,10 @@ public class JsonUtil {
|
|||
|
||||
/** Convert a HdfsFileStatus object to a Json string. */
|
||||
public static String toJsonString(final HdfsFileStatus status) {
|
||||
final Map<String, Object> m = jsonMap.get();
|
||||
if (status == null) {
|
||||
m.put("isNull", true);
|
||||
return null;
|
||||
} else {
|
||||
m.put("isNull", false);
|
||||
final Map<String, Object> m = jsonMap.get();
|
||||
m.put("localName", status.getLocalName());
|
||||
m.put("isDir", status.isDir());
|
||||
m.put("isSymlink", status.isSymlink());
|
||||
|
@ -97,8 +155,8 @@ public class JsonUtil {
|
|||
m.put("modificationTime", status.getModificationTime());
|
||||
m.put("blockSize", status.getBlockSize());
|
||||
m.put("replication", status.getReplication());
|
||||
return JSON.toString(m);
|
||||
}
|
||||
return JSON.toString(m);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -106,9 +164,9 @@ public class JsonUtil {
|
|||
return (Map<String, Object>) JSON.parse(jsonString);
|
||||
}
|
||||
|
||||
/** Convert a Json string to a HdfsFileStatus object. */
|
||||
/** Convert a Json map to a HdfsFileStatus object. */
|
||||
public static HdfsFileStatus toFileStatus(final Map<String, Object> m) {
|
||||
if ((Boolean)m.get("isNull")) {
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -130,4 +188,214 @@ public class JsonUtil {
|
|||
permission, owner, group,
|
||||
symlink, DFSUtil.string2Bytes(localName));
|
||||
}
|
||||
|
||||
/** Convert a LocatedBlock to a Json string. */
|
||||
public static String toJsonString(final ExtendedBlock extendedblock) {
|
||||
if (extendedblock == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<String, Object> m = extendedBlockMap.get();
|
||||
m.put("blockPoolId", extendedblock.getBlockPoolId());
|
||||
m.put("blockId", extendedblock.getBlockId());
|
||||
m.put("numBytes", extendedblock.getNumBytes());
|
||||
m.put("generationStamp", extendedblock.getGenerationStamp());
|
||||
return JSON.toString(m);
|
||||
}
|
||||
|
||||
/** Convert a Json map to an ExtendedBlock object. */
|
||||
public static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final String blockPoolId = (String)m.get("blockPoolId");
|
||||
final long blockId = (Long)m.get("blockId");
|
||||
final long numBytes = (Long)m.get("numBytes");
|
||||
final long generationStamp = (Long)m.get("generationStamp");
|
||||
return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
|
||||
}
|
||||
|
||||
/** Convert a DatanodeInfo to a Json string. */
|
||||
public static String toJsonString(final DatanodeInfo datanodeinfo) {
|
||||
if (datanodeinfo == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<String, Object> m = datanodeInfoMap.get();
|
||||
m.put("name", datanodeinfo.getName());
|
||||
m.put("storageID", datanodeinfo.getStorageID());
|
||||
m.put("infoPort", datanodeinfo.getInfoPort());
|
||||
|
||||
m.put("ipcPort", datanodeinfo.getIpcPort());
|
||||
|
||||
m.put("capacity", datanodeinfo.getCapacity());
|
||||
m.put("dfsUsed", datanodeinfo.getDfsUsed());
|
||||
m.put("remaining", datanodeinfo.getRemaining());
|
||||
m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
|
||||
m.put("lastUpdate", datanodeinfo.getLastUpdate());
|
||||
m.put("xceiverCount", datanodeinfo.getXceiverCount());
|
||||
m.put("networkLocation", datanodeinfo.getNetworkLocation());
|
||||
m.put("hostName", datanodeinfo.getHostName());
|
||||
m.put("adminState", datanodeinfo.getAdminState().name());
|
||||
return JSON.toString(m);
|
||||
}
|
||||
|
||||
/** Convert a Json map to an DatanodeInfo object. */
|
||||
public static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new DatanodeInfo(
|
||||
(String)m.get("name"),
|
||||
(String)m.get("storageID"),
|
||||
(int)(long)(Long)m.get("infoPort"),
|
||||
(int)(long)(Long)m.get("ipcPort"),
|
||||
|
||||
(Long)m.get("capacity"),
|
||||
(Long)m.get("dfsUsed"),
|
||||
(Long)m.get("remaining"),
|
||||
(Long)m.get("blockPoolUsed"),
|
||||
(Long)m.get("lastUpdate"),
|
||||
(int)(long)(Long)m.get("xceiverCount"),
|
||||
(String)m.get("networkLocation"),
|
||||
(String)m.get("hostName"),
|
||||
AdminStates.valueOf((String)m.get("adminState")));
|
||||
}
|
||||
|
||||
/** Convert a DatanodeInfo[] to a Json string. */
|
||||
public static String toJsonString(final DatanodeInfo[] array
|
||||
) throws IOException {
|
||||
if (array == null) {
|
||||
return null;
|
||||
} else if (array.length == 0) {
|
||||
return "[]";
|
||||
} else {
|
||||
final StringBuilder b = new StringBuilder().append('[').append(
|
||||
toJsonString(array[0]));
|
||||
for(int i = 1; i < array.length; i++) {
|
||||
b.append(", ").append(toJsonString(array[i]));
|
||||
}
|
||||
return b.append(']').toString();
|
||||
}
|
||||
}
|
||||
|
||||
/** Convert an Object[] to a DatanodeInfo[]. */
|
||||
public static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) {
|
||||
if (objects == null) {
|
||||
return null;
|
||||
} else if (objects.length == 0) {
|
||||
return EMPTY_DATANODE_INFO_ARRAY;
|
||||
} else {
|
||||
final DatanodeInfo[] array = new DatanodeInfo[objects.length];
|
||||
for(int i = 0; i < array.length; i++) {
|
||||
array[i] = (DatanodeInfo)toDatanodeInfo((Map<?, ?>) objects[i]);
|
||||
}
|
||||
return array;
|
||||
}
|
||||
}
|
||||
|
||||
/** Convert a LocatedBlock to a Json string. */
|
||||
public static String toJsonString(final LocatedBlock locatedblock
|
||||
) throws IOException {
|
||||
if (locatedblock == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<String, Object> m = locatedBlockMap.get();
|
||||
m.put("blockToken", toJsonString(locatedblock.getBlockToken()));
|
||||
m.put("isCorrupt", locatedblock.isCorrupt());
|
||||
m.put("startOffset", locatedblock.getStartOffset());
|
||||
m.put("block", toJsonString(locatedblock.getBlock()));
|
||||
|
||||
m.put("locations", toJsonString(locatedblock.getLocations()));
|
||||
return JSON.toString(m);
|
||||
}
|
||||
|
||||
/** Convert a Json map to LocatedBlock. */
|
||||
public static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final ExtendedBlock b = toExtendedBlock((Map<?, ?>)JSON.parse((String)m.get("block")));
|
||||
final DatanodeInfo[] locations = toDatanodeInfoArray(
|
||||
(Object[])JSON.parse((String)m.get("locations")));
|
||||
final long startOffset = (Long)m.get("startOffset");
|
||||
final boolean isCorrupt = (Boolean)m.get("isCorrupt");
|
||||
|
||||
final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt);
|
||||
locatedblock.setBlockToken(toBlockToken((Map<?, ?>)JSON.parse((String)m.get("blockToken"))));
|
||||
return locatedblock;
|
||||
}
|
||||
|
||||
/** Convert a LocatedBlock[] to a Json string. */
|
||||
public static String toJsonString(final List<LocatedBlock> array
|
||||
) throws IOException {
|
||||
if (array == null) {
|
||||
return null;
|
||||
} else if (array.size() == 0) {
|
||||
return "[]";
|
||||
} else {
|
||||
final StringBuilder b = new StringBuilder().append('[').append(
|
||||
toJsonString(array.get(0)));
|
||||
for(int i = 1; i < array.size(); i++) {
|
||||
b.append(",\n ").append(toJsonString(array.get(i)));
|
||||
}
|
||||
return b.append(']').toString();
|
||||
}
|
||||
}
|
||||
|
||||
/** Convert an Object[] to a List of LocatedBlock.
|
||||
* @throws IOException */
|
||||
public static List<LocatedBlock> toLocatedBlockList(final Object[] objects
|
||||
) throws IOException {
|
||||
if (objects == null) {
|
||||
return null;
|
||||
} else if (objects.length == 0) {
|
||||
return Collections.emptyList();
|
||||
} else {
|
||||
final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length);
|
||||
for(int i = 0; i < objects.length; i++) {
|
||||
list.add((LocatedBlock)toLocatedBlock((Map<?, ?>)objects[i]));
|
||||
}
|
||||
return list;
|
||||
}
|
||||
}
|
||||
|
||||
/** Convert LocatedBlocks to a Json string. */
|
||||
public static String toJsonString(final LocatedBlocks locatedblocks
|
||||
) throws IOException {
|
||||
if (locatedblocks == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<String, Object> m = jsonMap.get();
|
||||
m.put("fileLength", locatedblocks.getFileLength());
|
||||
m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
|
||||
|
||||
m.put("locatedBlocks", toJsonString(locatedblocks.getLocatedBlocks()));
|
||||
m.put("lastLocatedBlock", toJsonString(locatedblocks.getLastLocatedBlock()));
|
||||
m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
|
||||
return JSON.toString(m);
|
||||
}
|
||||
|
||||
/** Convert a Json map to LocatedBlock. */
|
||||
public static LocatedBlocks toLocatedBlocks(final Map<String, Object> m
|
||||
) throws IOException {
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final long fileLength = (Long)m.get("fileLength");
|
||||
final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
|
||||
final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
|
||||
(Object[])JSON.parse((String) m.get("locatedBlocks")));
|
||||
final LocatedBlock lastLocatedBlock = toLocatedBlock(
|
||||
(Map<?, ?>)JSON.parse((String)m.get("lastLocatedBlock")));
|
||||
final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
|
||||
return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
|
||||
lastLocatedBlock, isLastBlockComplete);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
import javax.ws.rs.core.UriBuilder;
|
||||
|
||||
import com.sun.jersey.spi.container.ContainerRequest;
|
||||
import com.sun.jersey.spi.container.ContainerRequestFilter;
|
||||
import com.sun.jersey.spi.container.ContainerResponseFilter;
|
||||
import com.sun.jersey.spi.container.ResourceFilter;
|
||||
|
||||
/**
|
||||
* A filter to change parameter names to lower cases
|
||||
* so that parameter names are considered as case insensitive.
|
||||
*/
|
||||
public class ParamFilter implements ResourceFilter {
|
||||
private static final ContainerRequestFilter LOWER_CASE
|
||||
= new ContainerRequestFilter() {
|
||||
@Override
|
||||
public ContainerRequest filter(final ContainerRequest request) {
|
||||
final MultivaluedMap<String, String> parameters = request.getQueryParameters();
|
||||
if (containsUpperCase(parameters.keySet())) {
|
||||
//rebuild URI
|
||||
final URI lower = rebuildQuery(request.getRequestUri(), parameters);
|
||||
request.setUris(request.getBaseUri(), lower);
|
||||
}
|
||||
return request;
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public ContainerRequestFilter getRequestFilter() {
|
||||
return LOWER_CASE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ContainerResponseFilter getResponseFilter() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Do the strings contain upper case letters? */
|
||||
private static boolean containsUpperCase(final Iterable<String> strings) {
|
||||
for(String s : strings) {
|
||||
for(int i = 0; i < s.length(); i++) {
|
||||
if (Character.isUpperCase(s.charAt(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Rebuild the URI query with lower case parameter names. */
|
||||
private static URI rebuildQuery(final URI uri,
|
||||
final MultivaluedMap<String, String> parameters) {
|
||||
UriBuilder b = UriBuilder.fromUri(uri).replaceQuery("");
|
||||
for(Map.Entry<String, List<String>> e : parameters.entrySet()) {
|
||||
final String key = e.getKey().toLowerCase();
|
||||
for(String v : e.getValue()) {
|
||||
b = b.queryParam(key, v);
|
||||
}
|
||||
}
|
||||
return b.build();
|
||||
}
|
||||
}
|
|
@ -27,9 +27,12 @@ import java.net.HttpURLConnection;
|
|||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
|
@ -45,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
|
||||
|
@ -54,7 +58,9 @@ import org.apache.hadoop.hdfs.web.resources.DstPathParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.GroupParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.LengthParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.OwnerParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.Param;
|
||||
|
@ -63,13 +69,16 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.RenewerParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
|
@ -82,17 +91,24 @@ public class WebHdfsFileSystem extends HftpFileSystem {
|
|||
|
||||
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
|
||||
|
||||
private UserGroupInformation ugi;
|
||||
private final UserGroupInformation ugi;
|
||||
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
|
||||
protected Path workingDir;
|
||||
|
||||
{
|
||||
try {
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void initialize(URI uri, Configuration conf
|
||||
) throws IOException {
|
||||
super.initialize(uri, conf);
|
||||
setConf(conf);
|
||||
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
this.workingDir = getHomeDirectory();
|
||||
}
|
||||
|
||||
|
@ -163,11 +179,11 @@ public class WebHdfsFileSystem extends HftpFileSystem {
|
|||
}
|
||||
}
|
||||
|
||||
private URL toUrl(final HttpOpParam.Op op, final Path fspath,
|
||||
URL toUrl(final HttpOpParam.Op op, final Path fspath,
|
||||
final Param<?,?>... parameters) throws IOException {
|
||||
//initialize URI path and query
|
||||
final String path = "/" + PATH_PREFIX
|
||||
+ makeQualified(fspath).toUri().getPath();
|
||||
+ (fspath == null? "/": makeQualified(fspath).toUri().getPath());
|
||||
final String query = op.toQueryString()
|
||||
+ '&' + new UserParam(ugi)
|
||||
+ Param.toSortedString("&", parameters);
|
||||
|
@ -396,4 +412,41 @@ public class WebHdfsFileSystem extends HftpFileSystem {
|
|||
}
|
||||
return statuses;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
|
||||
) throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
|
||||
final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
|
||||
final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m);
|
||||
token.setService(new Text(getCanonicalServiceName()));
|
||||
return token;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Token<?>> getDelegationTokens(final String renewer
|
||||
) throws IOException {
|
||||
final Token<?>[] t = {getDelegationToken(renewer)};
|
||||
return Arrays.asList(t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockLocation[] getFileBlockLocations(final FileStatus status,
|
||||
final long offset, final long length) throws IOException {
|
||||
if (status == null) {
|
||||
return null;
|
||||
}
|
||||
return getFileBlockLocations(status.getPath(), offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockLocation[] getFileBlockLocations(final Path p,
|
||||
final long offset, final long length) throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
|
||||
final Map<String, Object> m = run(op, p, new OffsetParam(offset),
|
||||
new LengthParam(length));
|
||||
return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
|
||||
}
|
||||
}
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
|
|||
/** Access time parameter. */
|
||||
public class AccessTimeParam extends LongParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "accessTime";
|
||||
public static final String NAME = "accesstime";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "-1";
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
/** Block size parameter. */
|
||||
public class BlockSizeParam extends LongParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "blockSize";
|
||||
public static final String NAME = "blocksize";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = NULL;
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|||
/** Buffer size parameter. */
|
||||
public class BufferSizeParam extends IntegerParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "bufferSize";
|
||||
public static final String NAME = "buffersize";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = NULL;
|
||||
|
||||
|
|
|
@ -17,13 +17,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
/** Delegation token parameter. */
|
||||
public class DelegationParam extends StringParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = JspHelper.DELEGATION_PARAMETER_NAME;
|
||||
public static final String NAME = "delegation";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
|
|
|
@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
|
|||
|
||||
/** Http DELETE operation parameter. */
|
||||
public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "deleteOp";
|
||||
|
||||
/** Delete operations. */
|
||||
public static enum Op implements HttpOpParam.Op {
|
||||
DELETE(HttpURLConnection.HTTP_OK),
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path;
|
|||
/** Destination path parameter. */
|
||||
public class DstPathParam extends StringParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "dstPath";
|
||||
public static final String NAME = "dstpath";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
|
|
|
@ -21,16 +21,16 @@ import java.net.HttpURLConnection;
|
|||
|
||||
/** Http GET operation parameter. */
|
||||
public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "getOp";
|
||||
|
||||
/** Get operations. */
|
||||
public static enum Op implements HttpOpParam.Op {
|
||||
OPEN(HttpURLConnection.HTTP_OK),
|
||||
GETFILEBLOCKLOCATIONS(HttpURLConnection.HTTP_OK),
|
||||
|
||||
GETFILESTATUS(HttpURLConnection.HTTP_OK),
|
||||
LISTSTATUS(HttpURLConnection.HTTP_OK),
|
||||
|
||||
GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
|
||||
|
||||
NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
|
||||
|
||||
final int expectedHttpResponseCode;
|
||||
|
|
|
@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.web.resources;
|
|||
/** Http operation parameter. */
|
||||
public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
|
||||
extends EnumParam<E> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "op";
|
||||
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = NULL;
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
|
|||
/** Modification time parameter. */
|
||||
public class ModificationTimeParam extends LongParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "modificationTime";
|
||||
public static final String NAME = "modificationtime";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "-1";
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
/** Recursive parameter. */
|
||||
/** Overwrite parameter. */
|
||||
public class OverwriteParam extends BooleanParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "overwrite";
|
||||
|
|
|
@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
|
|||
|
||||
/** Http POST operation parameter. */
|
||||
public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "postOp";
|
||||
|
||||
/** Post operations. */
|
||||
public static enum Op implements HttpOpParam.Op {
|
||||
APPEND(HttpURLConnection.HTTP_OK),
|
||||
|
|
|
@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
|
|||
|
||||
/** Http POST operation parameter. */
|
||||
public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "putOp";
|
||||
|
||||
/** Put operations. */
|
||||
public static enum Op implements HttpOpParam.Op {
|
||||
CREATE(true, HttpURLConnection.HTTP_CREATED),
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Options;
|
|||
/** Rename option set parameter. */
|
||||
public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "renameOptions";
|
||||
public static final String NAME = "renameoptions";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
/** Renewer parameter. */
|
||||
public class RenewerParam extends StringParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "renewer";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = NULL;
|
||||
|
||||
private static final Domain DOMAIN = new Domain(NAME, null);
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param str a string representation of the parameter value.
|
||||
*/
|
||||
public RenewerParam(final String str) {
|
||||
super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
}
|
|
@ -683,24 +683,4 @@ creations/deletions), or "all".</description>
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.web.authentication.kerberos.principal</name>
|
||||
<value>HTTP/${dfs.web.hostname}@${kerberos.realm}</value>
|
||||
<description>
|
||||
The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
||||
|
||||
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
|
||||
HTTP SPENGO specification.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.web.authentication.kerberos.keytab</name>
|
||||
<value>${user.home}/dfs.web.keytab</value>
|
||||
<description>
|
||||
The Kerberos keytab file with the credentials for the
|
||||
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
|
@ -72,6 +72,7 @@ public class TestDFSPermission extends TestCase {
|
|||
final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
|
||||
|
||||
private FileSystem fs;
|
||||
private MiniDFSCluster cluster;
|
||||
private static Random r;
|
||||
|
||||
static {
|
||||
|
@ -105,18 +106,25 @@ public class TestDFSPermission extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUp() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws IOException {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/** This tests if permission setting in create, mkdir, and
|
||||
* setPermission works correctly
|
||||
*/
|
||||
public void testPermissionSetting() throws Exception {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
testPermissionSetting(OpType.CREATE); // test file creation
|
||||
testPermissionSetting(OpType.MKDIRS); // test directory creation
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
testPermissionSetting(OpType.CREATE); // test file creation
|
||||
testPermissionSetting(OpType.MKDIRS); // test directory creation
|
||||
}
|
||||
|
||||
private void initFileSystem(short umask) throws Exception {
|
||||
|
@ -245,17 +253,22 @@ public class TestDFSPermission extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* check that ImmutableFsPermission can be used as the argument
|
||||
* to setPermission
|
||||
*/
|
||||
public void testImmutableFsPermission() throws IOException {
|
||||
fs = FileSystem.get(conf);
|
||||
|
||||
// set the permission of the root to be world-wide rwx
|
||||
fs.setPermission(new Path("/"),
|
||||
FsPermission.createImmutable((short)0777));
|
||||
}
|
||||
|
||||
/* check if the ownership of a file/directory is set correctly */
|
||||
public void testOwnership() throws Exception {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
testOwnership(OpType.CREATE); // test file creation
|
||||
testOwnership(OpType.MKDIRS); // test directory creation
|
||||
} finally {
|
||||
fs.close();
|
||||
cluster.shutdown();
|
||||
}
|
||||
testOwnership(OpType.CREATE); // test file creation
|
||||
testOwnership(OpType.MKDIRS); // test directory creation
|
||||
}
|
||||
|
||||
/* change a file/directory's owner and group.
|
||||
|
@ -342,9 +355,7 @@ public class TestDFSPermission extends TestCase {
|
|||
/* Check if namenode performs permission checking correctly for
|
||||
* superuser, file owner, group owner, and other users */
|
||||
public void testPermissionChecking() throws Exception {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
fs = FileSystem.get(conf);
|
||||
|
||||
// set the permission of the root to be world-wide rwx
|
||||
|
@ -401,7 +412,6 @@ public class TestDFSPermission extends TestCase {
|
|||
parentPermissions, permissions, parentPaths, filePaths, dirPaths);
|
||||
} finally {
|
||||
fs.close();
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,8 +29,7 @@ import java.util.Collection;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -40,8 +39,7 @@ import org.apache.hadoop.net.NetUtils;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
public class TestDFSUtil {
|
||||
/**
|
||||
|
@ -76,79 +74,141 @@ public class TestDFSUtil {
|
|||
}
|
||||
}
|
||||
|
||||
assertTrue("expected 1 corrupt files but got " + corruptCount,
|
||||
corruptCount == 1);
|
||||
|
||||
assertTrue("expected 1 corrupt files but got " + corruptCount,
|
||||
corruptCount == 1);
|
||||
|
||||
// test an empty location
|
||||
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
|
||||
assertEquals(0, bs.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for
|
||||
* {@link DFSUtil#getNameServiceIds(Configuration)}
|
||||
* {@link DFSUtil#getNameServiceId(Configuration)}
|
||||
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
|
||||
private Configuration setupAddress(String key) {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId from the configuration returned
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleNamenodes() throws IOException {
|
||||
public void getNameServiceId() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
|
||||
// Test - The configured nameserviceIds are returned
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for namenode is determined based on matching the address with
|
||||
* local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getNameNodeNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for backup node is determined based on matching the address
|
||||
* with local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getBackupNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for backup node is determined based on matching the address
|
||||
* with local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getSecondaryNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
|
||||
* exception is thrown when multiple rpc addresses match the local node's
|
||||
* address
|
||||
*/
|
||||
@Test(expected = HadoopIllegalArgumentException.class)
|
||||
public void testGetNameServiceIdException() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
||||
"localhost:9000");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||
"localhost:9001");
|
||||
DFSUtil.getNamenodeNameServiceId(conf);
|
||||
fail("Expected exception is not thrown");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameServiceIds(Configuration)}
|
||||
*/
|
||||
@Test
|
||||
public void testGetNameServiceIds() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
|
||||
Iterator<String> it = nameserviceIds.iterator();
|
||||
assertEquals(2, nameserviceIds.size());
|
||||
assertEquals("nn1", it.next().toString());
|
||||
assertEquals("nn2", it.next().toString());
|
||||
|
||||
// Tests default nameserviceId is returned
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
assertEquals("nn1", DFSUtil.getNameServiceId(conf));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
* {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
|
||||
* (Configuration)}
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleNamenodes() throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
// Test - configured list of namenodes are returned
|
||||
final String NN1_ADDRESS = "localhost:9000";
|
||||
final String NN2_ADDRESS = "localhost:9001";
|
||||
final String NN3_ADDRESS = "localhost:9002";
|
||||
conf.set(DFSUtil.getNameServiceIdKey(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
|
||||
|
||||
Collection<InetSocketAddress> nnAddresses =
|
||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
||||
NN1_ADDRESS);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||
NN2_ADDRESS);
|
||||
|
||||
Collection<InetSocketAddress> nnAddresses = DFSUtil
|
||||
.getNNServiceRpcAddresses(conf);
|
||||
assertEquals(2, nnAddresses.size());
|
||||
Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
|
||||
assertEquals(2, nameserviceIds.size());
|
||||
InetSocketAddress addr = iterator.next();
|
||||
assertEquals("localhost", addr.getHostName());
|
||||
assertEquals(9000, addr.getPort());
|
||||
addr = iterator.next();
|
||||
assertEquals("localhost", addr.getHostName());
|
||||
assertEquals(9001, addr.getPort());
|
||||
|
||||
|
||||
// Test - can look up nameservice ID from service address
|
||||
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
|
||||
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress1,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn1", nameserviceId);
|
||||
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
|
||||
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress2,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn2", nameserviceId);
|
||||
InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
|
||||
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress3,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertNull(nameserviceId);
|
||||
checkNameServiceId(conf, NN1_ADDRESS, "nn1");
|
||||
checkNameServiceId(conf, NN2_ADDRESS, "nn2");
|
||||
checkNameServiceId(conf, NN3_ADDRESS, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
public void checkNameServiceId(Configuration conf, String addr,
|
||||
String expectedNameServiceId) {
|
||||
InetSocketAddress s = NetUtils.createSocketAddr(addr);
|
||||
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals(expectedNameServiceId, nameserviceId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for
|
||||
* {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
|
||||
*/
|
||||
|
@ -157,27 +217,25 @@ public class TestDFSUtil {
|
|||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
final String DEFAULT_ADDRESS = "localhost:9000";
|
||||
final String NN2_ADDRESS = "localhost:9001";
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
||||
|
||||
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
||||
|
||||
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
|
||||
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertTrue(isDefault);
|
||||
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
|
||||
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertFalse(isDefault);
|
||||
}
|
||||
|
||||
|
||||
/** Tests to ensure default namenode is used as fallback */
|
||||
@Test
|
||||
public void testDefaultNamenode() throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
final String hdfs_default = "hdfs://localhost:9999/";
|
||||
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
|
||||
// If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that
|
||||
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
|
||||
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
|
||||
// default namenode address is returned.
|
||||
List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
assertEquals(1, addrList.size());
|
||||
|
@ -191,26 +249,26 @@ public class TestDFSUtil {
|
|||
@Test
|
||||
public void testConfModification() throws IOException {
|
||||
final HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
final String nameserviceId = DFSUtil.getNameServiceId(conf);
|
||||
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
|
||||
|
||||
// Set the nameservice specific keys with nameserviceId in the config key
|
||||
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
|
||||
// Note: value is same as the key
|
||||
conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
|
||||
}
|
||||
|
||||
|
||||
// Initialize generic keys from specific keys
|
||||
NameNode.initializeGenericKeys(conf);
|
||||
|
||||
NameNode.initializeGenericKeys(conf, nameserviceId);
|
||||
|
||||
// Retrieve the keys without nameserviceId and Ensure generic keys are set
|
||||
// to the correct value
|
||||
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
|
||||
assertEquals(key, conf.get(key));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Tests for empty configuration, an exception is thrown from
|
||||
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
|
@ -238,16 +296,16 @@ public class TestDFSUtil {
|
|||
} catch (IOException expected) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testGetServerInfo(){
|
||||
public void testGetServerInfo() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
String httpsport = DFSUtil.getInfoServer(null, conf, true);
|
||||
Assert.assertEquals("0.0.0.0:50470", httpsport);
|
||||
assertEquals("0.0.0.0:50470", httpsport);
|
||||
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
||||
Assert.assertEquals("0.0.0.0:50070", httpport);
|
||||
assertEquals("0.0.0.0:50070", httpport);
|
||||
}
|
||||
|
||||
}
|
|
@ -17,6 +17,10 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.OutputStream;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
|
@ -24,17 +28,15 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/** A class for testing quota-related commands */
|
||||
public class TestQuota {
|
||||
|
@ -841,6 +843,14 @@ public class TestQuota {
|
|||
DFSAdmin admin = new DFSAdmin(conf);
|
||||
|
||||
try {
|
||||
|
||||
//Test for deafult NameSpace Quota
|
||||
long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
|
||||
.getNamesystem());
|
||||
assertTrue(
|
||||
"Default namespace quota expected as long max. But the value is :"
|
||||
+ nsQuota, nsQuota == Long.MAX_VALUE);
|
||||
|
||||
Path dir = new Path("/test");
|
||||
boolean exceededQuota = false;
|
||||
ContentSummary c;
|
||||
|
|
|
@ -23,12 +23,12 @@ package org.apache.hadoop.hdfs.security;
|
|||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -38,12 +38,16 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -56,12 +60,13 @@ public class TestDelegationToken {
|
|||
@Before
|
||||
public void setUp() throws Exception {
|
||||
config = new HdfsConfiguration();
|
||||
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
|
||||
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
|
||||
config.set("hadoop.security.auth_to_local",
|
||||
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
|
||||
FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
|
||||
cluster = new MiniDFSCluster.Builder(config).build();
|
||||
cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
dtSecretManager = NameNodeAdapter.getDtSecretManager(
|
||||
cluster.getNamesystem());
|
||||
|
@ -153,6 +158,31 @@ public class TestDelegationToken {
|
|||
dtSecretManager.renewToken(token, "JobTracker");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDelegationTokenWebHdfsApi() throws Exception {
|
||||
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
|
||||
final String uri = WebHdfsFileSystem.SCHEME + "://"
|
||||
+ config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
//get file system as JobTracker
|
||||
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
|
||||
"JobTracker", new String[]{"user"});
|
||||
final WebHdfsFileSystem webhdfs = ugi.doAs(
|
||||
new PrivilegedExceptionAction<WebHdfsFileSystem>() {
|
||||
@Override
|
||||
public WebHdfsFileSystem run() throws Exception {
|
||||
return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
|
||||
}
|
||||
});
|
||||
|
||||
final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
|
||||
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
|
||||
byte[] tokenId = token.getIdentifier();
|
||||
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
|
||||
LOG.info("A valid token should have non-null password, and should be renewed successfully");
|
||||
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
|
||||
dtSecretManager.renewToken(token, "JobTracker");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDelegationTokenWithDoAs() throws Exception {
|
||||
final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
|
||||
|
|
|
@ -18,31 +18,34 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestHost2NodesMap extends TestCase {
|
||||
static private Host2NodesMap map = new Host2NodesMap();
|
||||
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
|
||||
public class TestHost2NodesMap {
|
||||
private Host2NodesMap map = new Host2NodesMap();
|
||||
private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
|
||||
new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
|
||||
};
|
||||
private final static DatanodeDescriptor NULL_NODE = null;
|
||||
private final static DatanodeDescriptor NODE =
|
||||
new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
|
||||
private final DatanodeDescriptor NULL_NODE = null;
|
||||
private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
|
||||
"/d1/r4");
|
||||
|
||||
static {
|
||||
@Before
|
||||
public void setup() {
|
||||
for(DatanodeDescriptor node:dataNodes) {
|
||||
map.add(node);
|
||||
}
|
||||
map.add(NULL_NODE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testContains() throws Exception {
|
||||
for(int i=0; i<dataNodes.length; i++) {
|
||||
assertTrue(map.contains(dataNodes[i]));
|
||||
|
@ -51,6 +54,7 @@ public class TestHost2NodesMap extends TestCase {
|
|||
assertFalse(map.contains(NODE));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDatanodeByHost() throws Exception {
|
||||
assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
|
||||
assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
|
||||
|
@ -59,6 +63,7 @@ public class TestHost2NodesMap extends TestCase {
|
|||
assertTrue(null==map.getDatanodeByHost("h4"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDatanodeByName() throws Exception {
|
||||
assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
|
||||
assertTrue(map.getDatanodeByName("h1:5030")==null);
|
||||
|
@ -71,6 +76,7 @@ public class TestHost2NodesMap extends TestCase {
|
|||
assertTrue(map.getDatanodeByName(null)==null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemove() throws Exception {
|
||||
assertFalse(map.remove(NODE));
|
||||
|
||||
|
|
|
@ -96,7 +96,8 @@ public class TestMulitipleNNDataBlockScanner {
|
|||
|
||||
String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
|
||||
String nsId = DFSUtil.getNamenodeNameServiceId(cluster
|
||||
.getConfiguration(i));
|
||||
namenodesBuilder.append(nsId);
|
||||
namenodesBuilder.append(",");
|
||||
}
|
||||
|
@ -116,7 +117,7 @@ public class TestMulitipleNNDataBlockScanner {
|
|||
LOG.info(ex.getMessage());
|
||||
}
|
||||
|
||||
namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
|
||||
namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
|
||||
.getConfiguration(2)));
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
|
||||
.toString());
|
||||
|
|
|
@ -17,21 +17,24 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import static org.junit.Assert.*;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Unit test for ReplicasMap class
|
||||
*/
|
||||
public class TestReplicasMap {
|
||||
private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
|
||||
private static final String bpid = "BP-TEST";
|
||||
private static final Block block = new Block(1234, 1234, 1234);
|
||||
private final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
|
||||
private final String bpid = "BP-TEST";
|
||||
private final Block block = new Block(1234, 1234, 1234);
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() {
|
||||
@Before
|
||||
public void setup() {
|
||||
map.add(bpid, new FinalizedReplica(block, null, null));
|
||||
}
|
||||
|
||||
|
|
|
@ -412,4 +412,11 @@ public abstract class FSImageTestUtil {
|
|||
public static FSImage getFSImage(NameNode node) {
|
||||
return node.getFSImage();
|
||||
}
|
||||
|
||||
/**
|
||||
* get NameSpace quota.
|
||||
*/
|
||||
public static long getNSQuota(FSNamesystem ns) {
|
||||
return ns.dir.rootDir.getNsQuota();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,290 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestProcessCorruptBlocks {
|
||||
/**
|
||||
* The corrupt block has to be removed when the number of valid replicas
|
||||
* matches replication factor for the file. In this the above condition is
|
||||
* tested by reducing the replication factor
|
||||
* The test strategy :
|
||||
* Bring up Cluster with 3 DataNodes
|
||||
* Create a file of replication factor 3
|
||||
* Corrupt one replica of a block of the file
|
||||
* Verify that there are still 2 good replicas and 1 corrupt replica
|
||||
* (corrupt replica should not be removed since number of good
|
||||
* replicas (2) is less than replication factor (3))
|
||||
* Set the replication factor to 2
|
||||
* Verify that the corrupt replica is removed.
|
||||
* (corrupt replica should not be removed since number of good
|
||||
* replicas (2) is equal to replication factor (2))
|
||||
*/
|
||||
@Test
|
||||
public void testWhenDecreasingReplication() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
final FSNamesystem namesystem = cluster.getNamesystem();
|
||||
|
||||
try {
|
||||
final Path fileName = new Path("/foo1");
|
||||
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
|
||||
|
||||
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
|
||||
corruptBlock(cluster, fs, fileName, 0, block);
|
||||
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
|
||||
|
||||
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
|
||||
|
||||
namesystem.setReplication(fileName.toString(), (short) 2);
|
||||
|
||||
// wait for 3 seconds so that all block reports are processed.
|
||||
try {
|
||||
Thread.sleep(3000);
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
|
||||
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
|
||||
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The corrupt block has to be removed when the number of valid replicas
|
||||
* matches replication factor for the file. In this test, the above
|
||||
* condition is achieved by increasing the number of good replicas by
|
||||
* replicating on a new Datanode.
|
||||
* The test strategy :
|
||||
* Bring up Cluster with 3 DataNodes
|
||||
* Create a file of replication factor 3
|
||||
* Corrupt one replica of a block of the file
|
||||
* Verify that there are still 2 good replicas and 1 corrupt replica
|
||||
* (corrupt replica should not be removed since number of good replicas
|
||||
* (2) is less than replication factor (3))
|
||||
* Start a new data node
|
||||
* Verify that the a new replica is created and corrupt replica is
|
||||
* removed.
|
||||
*
|
||||
*/
|
||||
@Test
|
||||
public void testByAddingAnExtraDataNode() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
final FSNamesystem namesystem = cluster.getNamesystem();
|
||||
DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);
|
||||
|
||||
try {
|
||||
final Path fileName = new Path("/foo1");
|
||||
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
|
||||
|
||||
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
|
||||
corruptBlock(cluster, fs, fileName, 0, block);
|
||||
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
|
||||
|
||||
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
|
||||
|
||||
cluster.restartDataNode(dnPropsFourth);
|
||||
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
|
||||
|
||||
assertEquals(3, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The corrupt block has to be removed when the number of valid replicas
|
||||
* matches replication factor for the file. The above condition should hold
|
||||
* true as long as there is one good replica. This test verifies that.
|
||||
*
|
||||
* The test strategy :
|
||||
* Bring up Cluster with 2 DataNodes
|
||||
* Create a file of replication factor 2
|
||||
* Corrupt one replica of a block of the file
|
||||
* Verify that there is one good replicas and 1 corrupt replica
|
||||
* (corrupt replica should not be removed since number of good
|
||||
* replicas (1) is less than replication factor (2)).
|
||||
* Set the replication factor to 1
|
||||
* Verify that the corrupt replica is removed.
|
||||
* (corrupt replica should be removed since number of good
|
||||
* replicas (1) is equal to replication factor (1))
|
||||
*/
|
||||
@Test
|
||||
public void testWithReplicationFactorAsOne() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
final FSNamesystem namesystem = cluster.getNamesystem();
|
||||
|
||||
try {
|
||||
final Path fileName = new Path("/foo1");
|
||||
DFSTestUtil.createFile(fs, fileName, 2, (short) 2, 0L);
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
|
||||
|
||||
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
|
||||
corruptBlock(cluster, fs, fileName, 0, block);
|
||||
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
|
||||
|
||||
assertEquals(1, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
|
||||
|
||||
namesystem.setReplication(fileName.toString(), (short) 1);
|
||||
|
||||
// wait for 3 seconds so that all block reports are processed.
|
||||
try {
|
||||
Thread.sleep(3000);
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
|
||||
assertEquals(1, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
|
||||
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* None of the blocks can be removed if all blocks are corrupt.
|
||||
*
|
||||
* The test strategy :
|
||||
* Bring up Cluster with 3 DataNodes
|
||||
* Create a file of replication factor 3
|
||||
* Corrupt all three replicas
|
||||
* Verify that all replicas are corrupt and 3 replicas are present.
|
||||
* Set the replication factor to 1
|
||||
* Verify that all replicas are corrupt and 3 replicas are present.
|
||||
*/
|
||||
@Test
|
||||
public void testWithAllCorruptReplicas() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
final FSNamesystem namesystem = cluster.getNamesystem();
|
||||
|
||||
try {
|
||||
final Path fileName = new Path("/foo1");
|
||||
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
|
||||
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
|
||||
|
||||
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
|
||||
corruptBlock(cluster, fs, fileName, 0, block);
|
||||
|
||||
corruptBlock(cluster, fs, fileName, 1, block);
|
||||
|
||||
corruptBlock(cluster, fs, fileName, 2, block);
|
||||
|
||||
// wait for 3 seconds so that all block reports are processed.
|
||||
try {
|
||||
Thread.sleep(3000);
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
|
||||
assertEquals(0, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
|
||||
|
||||
namesystem.setReplication(fileName.toString(), (short) 1);
|
||||
|
||||
// wait for 3 seconds so that all block reports are processed.
|
||||
try {
|
||||
Thread.sleep(3000);
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
|
||||
assertEquals(0, countReplicas(namesystem, block).liveReplicas());
|
||||
assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
|
||||
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private static NumberReplicas countReplicas(final FSNamesystem namesystem, ExtendedBlock block) {
|
||||
return namesystem.getBlockManager().countNodes(block.getLocalBlock());
|
||||
}
|
||||
|
||||
private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path fileName,
|
||||
int dnIndex, ExtendedBlock block) throws IOException {
|
||||
// corrupt the block on datanode dnIndex
|
||||
// the indexes change once the nodes are restarted.
|
||||
// But the datadirectory will not change
|
||||
assertTrue(MiniDFSCluster.corruptReplica(dnIndex, block));
|
||||
|
||||
DataNodeProperties dnProps = cluster.stopDataNode(0);
|
||||
|
||||
// Each datanode has multiple data dirs, check each
|
||||
for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
|
||||
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
File storageDir = MiniDFSCluster.getStorageDir(dnIndex, dirIndex);
|
||||
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||
File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
|
||||
if (scanLogFile.exists()) {
|
||||
// wait for one minute for deletion to succeed;
|
||||
for (int i = 0; !scanLogFile.delete(); i++) {
|
||||
assertTrue("Could not delete log file in one minute", i < 60);
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// restart the detained so the corrupt replica will be detected
|
||||
cluster.restartDataNode(dnProps);
|
||||
}
|
||||
}
|
|
@ -18,17 +18,23 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemContractBaseTest;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
|
@ -114,4 +120,42 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
|
|||
// also okay for HDFS.
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetFileBlockLocations() throws IOException {
|
||||
final String f = "/test/testGetFileBlockLocations";
|
||||
createFile(path(f));
|
||||
final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
|
||||
final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
|
||||
new Path(f), 0L, 1L);
|
||||
assertEquals(expected.length, computed.length);
|
||||
for(int i = 0; i < computed.length; i++) {
|
||||
assertEquals(expected[i].toString(), computed[i].toString());
|
||||
}
|
||||
}
|
||||
|
||||
public void testCaseInsensitive() throws IOException {
|
||||
final Path p = new Path("/test/testCaseInsensitive");
|
||||
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
|
||||
final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
|
||||
|
||||
//replace query with mix case letters
|
||||
final URL url = webhdfs.toUrl(op, p);
|
||||
WebHdfsFileSystem.LOG.info("url = " + url);
|
||||
final URL replaced = new URL(url.toString().replace(op.toQueryString(),
|
||||
"Op=mkDIrs"));
|
||||
WebHdfsFileSystem.LOG.info("replaced = " + replaced);
|
||||
|
||||
//connect with the replaced URL.
|
||||
final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
|
||||
conn.setRequestMethod(op.getType().toString());
|
||||
conn.connect();
|
||||
final BufferedReader in = new BufferedReader(new InputStreamReader(
|
||||
conn.getInputStream()));
|
||||
for(String line; (line = in.readLine()) != null; ) {
|
||||
WebHdfsFileSystem.LOG.info("> " + line);
|
||||
}
|
||||
|
||||
//check if the command successes.
|
||||
assertTrue(fs.getFileStatus(p).isDirectory());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ Trunk (unreleased changes)
|
|||
findBugs, correct links to findBugs artifacts and no links to the
|
||||
artifacts when there are no warnings. (Tom White via vinodkv).
|
||||
|
||||
MAPREDUCE-3081. Fix vaidya startup script. (gkesavan via suhas).
|
||||
|
||||
Release 0.23.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -70,6 +72,9 @@ Release 0.23.0 - Unreleased
|
|||
MAPREDUCE-2037. Capture intermediate progress, CPU and memory usage for
|
||||
tasks. (Dick King via acmurthy)
|
||||
|
||||
MAPREDUCE-2930. Added the ability to be able to generate graphs from the
|
||||
state-machine definitions. (Binglin Chang via vinodkv)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
|
||||
|
@ -307,6 +312,15 @@ Release 0.23.0 - Unreleased
|
|||
MAPREDUCE-2726. Added job-file to the AM and JobHistoryServer web
|
||||
interfaces. (Jeffrey Naisbitt via vinodkv)
|
||||
|
||||
MAPREDUCE-3055. Simplified ApplicationAttemptId passing to
|
||||
ApplicationMaster via environment variable. (vinodkv)
|
||||
|
||||
MAPREDUCE-3092. Removed a special comparator for JobIDs in JobHistory as
|
||||
JobIDs are already comparable. (Devaraj K via vinodkv)
|
||||
|
||||
MAPREDUCE-3099. Add docs for setting up a single node MRv2 cluster.
|
||||
(mahadev)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
MAPREDUCE-2026. Make JobTracker.getJobCounters() and
|
||||
|
@ -318,6 +332,9 @@ Release 0.23.0 - Unreleased
|
|||
|
||||
MAPREDUCE-901. Efficient framework counters. (llu via acmurthy)
|
||||
|
||||
MAPREDUCE-2880. Improve classpath-construction for mapreduce AM and
|
||||
containers. (Arun C Murthy via vinodkv)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
MAPREDUCE-2603. Disable High-Ram emulation in system tests.
|
||||
|
@ -1370,6 +1387,91 @@ Release 0.23.0 - Unreleased
|
|||
YarnClientProtocolProvider and ensured MiniMRYarnCluster sets JobHistory
|
||||
configuration for tests. (acmurthy)
|
||||
|
||||
MAPREDUCE-3018. Fixed -file option for streaming. (mahadev via acmurthy)
|
||||
|
||||
MAPREDUCE-3036. Fixed metrics for reserved resources in CS. (Robert Evans
|
||||
via acmurthy)
|
||||
|
||||
MAPREDUCE-2998. Fixed a bug in TaskAttemptImpl which caused it to fork
|
||||
bin/mapred too many times. (vinodkv via acmurthy)
|
||||
|
||||
MAPREDUCE-3023. Fixed clients to display queue state correctly. (Ravi
|
||||
Prakash via acmurthy)
|
||||
|
||||
MAPREDUCE-2970. Fixed NPEs in corner cases with different configurations
|
||||
for mapreduce.framework.name. (Venu Gopala Rao via vinodkv)
|
||||
|
||||
MAPREDUCE-3062. Fixed default RMAdmin address. (Chris Riccomini
|
||||
via acmurthy)
|
||||
|
||||
MAPREDUCE-3066. Fixed default ResourceTracker address for the NodeManager.
|
||||
(Chris Riccomini via acmurthy)
|
||||
|
||||
MAPREDUCE-3044. Pipes jobs stuck without making progress. (mahadev)
|
||||
|
||||
MAPREDUCE-2754. Fixed MR AM stdout, stderr and syslog to redirect to
|
||||
correct log-files. (Ravi Teja Ch N V via vinodkv)
|
||||
|
||||
MAPREDUCE-3073. Fixed build issues in MR1. (mahadev via acmurthy)
|
||||
|
||||
MAPREDUCE-2691. Increase threadpool size for launching containers in
|
||||
MapReduce ApplicationMaster. (vinodkv via acmurthy)
|
||||
|
||||
|
||||
MAPREDUCE-2990. Fixed display of NodeHealthStatus. (Subroto Sanyal via
|
||||
acmurthy)
|
||||
|
||||
MAPREDUCE-3053. Better diagnostic message for unknown methods in ProtoBuf
|
||||
RPCs. (vinodkv via acmurthy)
|
||||
|
||||
MAPREDUCE-2952. Fixed ResourceManager/MR-client to consume diagnostics
|
||||
for AM failures in a couple of corner cases. (Arun C Murthy via vinodkv)
|
||||
|
||||
MAPREDUCE-3064. 27 unit test failures with Invalid
|
||||
"mapreduce.jobtracker.address" configuration value for
|
||||
JobTracker: "local" (Venu Gopala Rao via mahadev)
|
||||
|
||||
MAPREDUCE-3090. Fix MR AM to use ApplicationAttemptId rather than
|
||||
(ApplicationId, startCount) consistently. (acmurthy)
|
||||
|
||||
MAPREDUCE-2646. Fixed AMRMProtocol to return containers based on
|
||||
priority. (Sharad Agarwal and Arun C Murthy via vinodkv)
|
||||
|
||||
MAPREDUCE-3031. Proper handling of killed containers to prevent stuck
|
||||
containers/AMs on an external kill signal. (Siddharth Seth via vinodkv)
|
||||
|
||||
MAPREDUCE-2984. Better error message for displaying completed containers.
|
||||
(Devaraj K via acmurthy)
|
||||
|
||||
MAPREDUCE-3071. app master configuration web UI link under the Job menu
|
||||
opens up application menu. (thomas graves via mahadev)
|
||||
|
||||
MAPREDUCE-3067. Ensure exit-code is set correctly for containers. (Hitesh
|
||||
Shah via acmurthy)
|
||||
|
||||
MAPREDUCE-2999. Fix YARN webapp framework to properly filter servlet
|
||||
paths. (Thomas Graves via vinodkv)
|
||||
|
||||
MAPREDUCE-3095. fairscheduler ivy including wrong version for hdfs.
|
||||
(John George via mahadev)
|
||||
|
||||
MAPREDUCE-3054. Unable to kill submitted jobs. (mahadev)
|
||||
|
||||
MAPREDUCE-3021. Change base urls for RM web-ui. (Thomas Graves via
|
||||
acmurthy)
|
||||
|
||||
MAPREDUCE-3041. Fixed ClientRMProtocol to provide min/max resource
|
||||
capabilities along-with new ApplicationId for application submission.
|
||||
(Hitesh Shah via acmurthy)
|
||||
|
||||
MAPREDUCE-2843. Fixed the node-table to be completely displayed and making
|
||||
node entries on RM UI to be sortable. (Abhijit Suresh Shingate via vinodkv)
|
||||
|
||||
MAPREDUCE-3110. Fixed TestRPC failure. (vinodkv)
|
||||
|
||||
MAPREDUCE-3078. Ensure MapReduce AM reports progress correctly for
|
||||
displaying on the RM Web-UI. (vinodkv via acmurthy)
|
||||
|
||||
Release 0.22.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -55,6 +55,12 @@
|
|||
<artifactId>hadoop-yarn-server-resourcemanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-server-resourcemanager</artifactId>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-mapreduce-client-shuffle</artifactId>
|
||||
|
@ -113,4 +119,41 @@
|
|||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>visualize</id>
|
||||
<activation>
|
||||
<activeByDefault>false</activeByDefault>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
<version>1.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>java</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<classpathScope>test</classpathScope>
|
||||
<mainClass>org.apache.hadoop.yarn.util.VisualizeStateMachine</mainClass>
|
||||
<arguments>
|
||||
<argument>MapReduce</argument>
|
||||
<argument>org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl,
|
||||
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl,
|
||||
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl</argument>
|
||||
<argument>MapReduce.gv</argument>
|
||||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
|
|
|
@ -18,27 +18,27 @@
|
|||
|
||||
package org.apache.hadoop.mapred;
|
||||
|
||||
import java.io.File;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Vector;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapred.TaskLog.LogName;
|
||||
import org.apache.hadoop.mapreduce.ID;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
|
||||
public class MapReduceChildJVM {
|
||||
private static final String SYSTEM_PATH_SEPARATOR =
|
||||
System.getProperty("path.separator");
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(MapReduceChildJVM.class);
|
||||
|
||||
private static File getTaskLogFile(String logDir, LogName filter) {
|
||||
return new File(logDir, filter.toString());
|
||||
private static String getTaskLogFile(LogName filter) {
|
||||
return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
|
||||
filter.toString();
|
||||
}
|
||||
|
||||
private static String getChildEnv(JobConf jobConf, boolean isMap) {
|
||||
|
@ -50,32 +50,53 @@ public class MapReduceChildJVM {
|
|||
jobConf.get(jobConf.MAPRED_TASK_ENV));
|
||||
}
|
||||
|
||||
public static void setVMEnv(Map<String, String> env,
|
||||
List<String> classPaths, String pwd, String containerLogDir,
|
||||
String nmLdLibraryPath, Task task, CharSequence applicationTokensFile) {
|
||||
private static String getChildLogLevel(JobConf conf, boolean isMap) {
|
||||
if (isMap) {
|
||||
return conf.get(
|
||||
MRJobConfig.MAP_LOG_LEVEL,
|
||||
JobConf.DEFAULT_LOG_LEVEL.toString()
|
||||
);
|
||||
} else {
|
||||
return conf.get(
|
||||
MRJobConfig.REDUCE_LOG_LEVEL,
|
||||
JobConf.DEFAULT_LOG_LEVEL.toString()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public static void setVMEnv(Map<String, String> environment,
|
||||
Task task) {
|
||||
|
||||
JobConf conf = task.conf;
|
||||
|
||||
// Add classpath.
|
||||
CharSequence cp = env.get("CLASSPATH");
|
||||
String classpath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
|
||||
if (null == cp) {
|
||||
env.put("CLASSPATH", classpath);
|
||||
} else {
|
||||
env.put("CLASSPATH", classpath + SYSTEM_PATH_SEPARATOR + cp);
|
||||
}
|
||||
// Shell
|
||||
environment.put(
|
||||
Environment.SHELL.name(),
|
||||
conf.get(
|
||||
MRJobConfig.MAPRED_ADMIN_USER_SHELL,
|
||||
MRJobConfig.DEFAULT_SHELL)
|
||||
);
|
||||
|
||||
// Add pwd to LD_LIBRARY_PATH, add this before adding anything else
|
||||
MRApps.addToEnvironment(
|
||||
environment,
|
||||
Environment.LD_LIBRARY_PATH.name(),
|
||||
Environment.PWD.$());
|
||||
|
||||
/////// Environmental variable LD_LIBRARY_PATH
|
||||
StringBuilder ldLibraryPath = new StringBuilder();
|
||||
// Add the env variables passed by the user & admin
|
||||
String mapredChildEnv = getChildEnv(conf, task.isMapTask());
|
||||
MRApps.setEnvFromInputString(environment, mapredChildEnv);
|
||||
MRApps.setEnvFromInputString(
|
||||
environment,
|
||||
conf.get(
|
||||
MRJobConfig.MAPRED_ADMIN_USER_ENV,
|
||||
MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV)
|
||||
);
|
||||
|
||||
ldLibraryPath.append(nmLdLibraryPath);
|
||||
ldLibraryPath.append(SYSTEM_PATH_SEPARATOR);
|
||||
ldLibraryPath.append(pwd);
|
||||
env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
|
||||
/////// Environmental variable LD_LIBRARY_PATH
|
||||
|
||||
// for the child of task jvm, set hadoop.root.logger
|
||||
env.put("HADOOP_ROOT_LOGGER", "DEBUG,CLA"); // TODO: Debug
|
||||
// Set logging level
|
||||
environment.put(
|
||||
"HADOOP_ROOT_LOGGER",
|
||||
getChildLogLevel(conf, task.isMapTask()) + ",CLA");
|
||||
|
||||
// TODO: The following is useful for instance in streaming tasks. Should be
|
||||
// set in ApplicationMaster's env by the RM.
|
||||
|
@ -89,76 +110,69 @@ public class MapReduceChildJVM {
|
|||
// properties.
|
||||
long logSize = TaskLog.getTaskLogLength(conf);
|
||||
Vector<String> logProps = new Vector<String>(4);
|
||||
setupLog4jProperties(logProps, logSize, containerLogDir);
|
||||
setupLog4jProperties(logProps, logSize);
|
||||
Iterator<String> it = logProps.iterator();
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
while (it.hasNext()) {
|
||||
buffer.append(" " + it.next());
|
||||
}
|
||||
hadoopClientOpts = hadoopClientOpts + buffer.toString();
|
||||
|
||||
env.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
|
||||
environment.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
|
||||
|
||||
// add the env variables passed by the user
|
||||
String mapredChildEnv = getChildEnv(conf, task.isMapTask());
|
||||
if (mapredChildEnv != null && mapredChildEnv.length() > 0) {
|
||||
String childEnvs[] = mapredChildEnv.split(",");
|
||||
for (String cEnv : childEnvs) {
|
||||
String[] parts = cEnv.split("="); // split on '='
|
||||
String value = (String) env.get(parts[0]);
|
||||
if (value != null) {
|
||||
// replace $env with the child's env constructed by tt's
|
||||
// example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
|
||||
value = parts[1].replace("$" + parts[0], value);
|
||||
} else {
|
||||
// this key is not configured by the tt for the child .. get it
|
||||
// from the tt's env
|
||||
// example PATH=$PATH:/tmp
|
||||
value = System.getenv(parts[0]); // Get from NM?
|
||||
if (value != null) {
|
||||
// the env key is present in the tt's env
|
||||
value = parts[1].replace("$" + parts[0], value);
|
||||
} else {
|
||||
// the env key is note present anywhere .. simply set it
|
||||
// example X=$X:/tmp or X=/tmp
|
||||
value = parts[1].replace("$" + parts[0], "");
|
||||
}
|
||||
}
|
||||
env.put(parts[0], value);
|
||||
}
|
||||
}
|
||||
|
||||
//This should not be set here (If an OS check is requied. moved to ContainerLuanch)
|
||||
// env.put("JVM_PID", "`echo $$`");
|
||||
|
||||
env.put(Constants.STDOUT_LOGFILE_ENV,
|
||||
getTaskLogFile(containerLogDir, TaskLog.LogName.STDOUT).toString());
|
||||
env.put(Constants.STDERR_LOGFILE_ENV,
|
||||
getTaskLogFile(containerLogDir, TaskLog.LogName.STDERR).toString());
|
||||
// Add stdout/stderr env
|
||||
environment.put(
|
||||
MRJobConfig.STDOUT_LOGFILE_ENV,
|
||||
getTaskLogFile(TaskLog.LogName.STDOUT)
|
||||
);
|
||||
environment.put(
|
||||
MRJobConfig.STDERR_LOGFILE_ENV,
|
||||
getTaskLogFile(TaskLog.LogName.STDERR)
|
||||
);
|
||||
}
|
||||
|
||||
private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
|
||||
String userClasspath = "";
|
||||
String adminClasspath = "";
|
||||
if (isMapTask) {
|
||||
return jobConf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, jobConf.get(
|
||||
JobConf.MAPRED_TASK_JAVA_OPTS,
|
||||
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
|
||||
userClasspath =
|
||||
jobConf.get(
|
||||
JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
|
||||
jobConf.get(
|
||||
JobConf.MAPRED_TASK_JAVA_OPTS,
|
||||
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
|
||||
);
|
||||
adminClasspath =
|
||||
jobConf.get(
|
||||
MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
|
||||
MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
|
||||
} else {
|
||||
userClasspath =
|
||||
jobConf.get(
|
||||
JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
|
||||
jobConf.get(
|
||||
JobConf.MAPRED_TASK_JAVA_OPTS,
|
||||
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
|
||||
);
|
||||
adminClasspath =
|
||||
jobConf.get(
|
||||
MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
|
||||
MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
|
||||
}
|
||||
return jobConf
|
||||
.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, jobConf.get(
|
||||
JobConf.MAPRED_TASK_JAVA_OPTS,
|
||||
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
|
||||
|
||||
// Add admin classpath first so it can be overridden by user.
|
||||
return adminClasspath + " " + userClasspath;
|
||||
}
|
||||
|
||||
private static void setupLog4jProperties(Vector<String> vargs,
|
||||
long logSize, String containerLogDir) {
|
||||
long logSize) {
|
||||
vargs.add("-Dlog4j.configuration=container-log4j.properties");
|
||||
vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + containerLogDir);
|
||||
vargs.add("-Dhadoop.yarn.mr.totalLogFileSize=" + logSize);
|
||||
vargs.add("-D" + MRJobConfig.TASK_LOG_DIR + "=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
|
||||
vargs.add("-D" + MRJobConfig.TASK_LOG_SIZE + "=" + logSize);
|
||||
}
|
||||
|
||||
public static List<String> getVMCommand(
|
||||
InetSocketAddress taskAttemptListenerAddr, Task task, String javaHome,
|
||||
String workDir, String logDir, String childTmpDir, ID jvmID) {
|
||||
InetSocketAddress taskAttemptListenerAddr, Task task,
|
||||
ID jvmID) {
|
||||
|
||||
TaskAttemptID attemptID = task.getTaskID();
|
||||
JobConf conf = task.conf;
|
||||
|
@ -166,7 +180,7 @@ public class MapReduceChildJVM {
|
|||
Vector<String> vargs = new Vector<String>(8);
|
||||
|
||||
vargs.add("exec");
|
||||
vargs.add(javaHome + "/bin/java");
|
||||
vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
|
||||
|
||||
// Add child (task) java-vm options.
|
||||
//
|
||||
|
@ -199,44 +213,26 @@ public class MapReduceChildJVM {
|
|||
String javaOpts = getChildJavaOpts(conf, task.isMapTask());
|
||||
javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
|
||||
String [] javaOptsSplit = javaOpts.split(" ");
|
||||
|
||||
// Add java.library.path; necessary for loading native libraries.
|
||||
//
|
||||
// 1. We add the 'cwd' of the task to it's java.library.path to help
|
||||
// users distribute native libraries via the DistributedCache.
|
||||
// 2. The user can also specify extra paths to be added to the
|
||||
// java.library.path via mapred.{map|reduce}.child.java.opts.
|
||||
//
|
||||
String libraryPath = workDir;
|
||||
boolean hasUserLDPath = false;
|
||||
for(int i=0; i<javaOptsSplit.length ;i++) {
|
||||
if(javaOptsSplit[i].startsWith("-Djava.library.path=")) {
|
||||
// TODO: Does the above take care of escaped space chars
|
||||
javaOptsSplit[i] += SYSTEM_PATH_SEPARATOR + libraryPath;
|
||||
hasUserLDPath = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(!hasUserLDPath) {
|
||||
vargs.add("-Djava.library.path=" + libraryPath);
|
||||
}
|
||||
for (int i = 0; i < javaOptsSplit.length; i++) {
|
||||
vargs.add(javaOptsSplit[i]);
|
||||
}
|
||||
|
||||
if (childTmpDir != null) {
|
||||
vargs.add("-Djava.io.tmpdir=" + childTmpDir);
|
||||
}
|
||||
String childTmpDir = Environment.PWD.$() + Path.SEPARATOR + "tmp";
|
||||
vargs.add("-Djava.io.tmpdir=" + childTmpDir);
|
||||
|
||||
// Setup the log4j prop
|
||||
long logSize = TaskLog.getTaskLogLength(conf);
|
||||
setupLog4jProperties(vargs, logSize, logDir);
|
||||
setupLog4jProperties(vargs, logSize);
|
||||
|
||||
if (conf.getProfileEnabled()) {
|
||||
if (conf.getProfileTaskRange(task.isMapTask()
|
||||
).isIncluded(task.getPartition())) {
|
||||
File prof = getTaskLogFile(logDir, TaskLog.LogName.PROFILE);
|
||||
vargs.add(String.format(conf.getProfileParams(), prof.toString()));
|
||||
vargs.add(
|
||||
String.format(
|
||||
conf.getProfileParams(),
|
||||
getTaskLogFile(TaskLog.LogName.PROFILE)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,8 +245,8 @@ public class MapReduceChildJVM {
|
|||
|
||||
// Finally add the jvmID
|
||||
vargs.add(String.valueOf(jvmID.getId()));
|
||||
vargs.add("1>" + getTaskLogFile(logDir, TaskLog.LogName.STDERR));
|
||||
vargs.add("2>" + getTaskLogFile(logDir, TaskLog.LogName.STDOUT));
|
||||
vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
|
||||
vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));
|
||||
|
||||
// Final commmand
|
||||
StringBuilder mergedCommand = new StringBuilder();
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.mapreduce.filecache.DistributedCache;
|
|||
import org.apache.hadoop.mapreduce.security.TokenCache;
|
||||
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
|
||||
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
||||
import org.apache.hadoop.mapreduce.v2.MRConstants;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
|
@ -71,7 +70,7 @@ class YarnChild {
|
|||
LOG.debug("Child starting");
|
||||
|
||||
final JobConf defaultConf = new JobConf();
|
||||
defaultConf.addResource(MRConstants.JOB_CONF_FILE);
|
||||
defaultConf.addResource(MRJobConfig.JOB_CONF_FILE);
|
||||
UserGroupInformation.setConfiguration(defaultConf);
|
||||
|
||||
String host = args[0];
|
||||
|
@ -238,7 +237,7 @@ class YarnChild {
|
|||
|
||||
private static JobConf configureTask(Task task, Credentials credentials,
|
||||
Token<JobTokenIdentifier> jt) throws IOException {
|
||||
final JobConf job = new JobConf(MRConstants.JOB_CONF_FILE);
|
||||
final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE);
|
||||
job.setCredentials(credentials);
|
||||
// set tcp nodelay
|
||||
job.setBoolean("ipc.client.tcpnodelay", true);
|
||||
|
@ -260,7 +259,7 @@ class YarnChild {
|
|||
|
||||
// Overwrite the localized task jobconf which is linked to in the current
|
||||
// work-dir.
|
||||
Path localTaskFile = new Path(Constants.JOBFILE);
|
||||
Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
|
||||
writeLocalJobFile(localTaskFile, job);
|
||||
task.setJobFile(localTaskFile.toString());
|
||||
task.setConf(job);
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.hadoop.mapreduce.MRJobConfig;
|
|||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
|
||||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
|
||||
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
||||
import org.apache.hadoop.mapreduce.v2.MRConstants;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
|
||||
|
@ -78,6 +77,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
|||
import org.apache.hadoop.yarn.Clock;
|
||||
import org.apache.hadoop.yarn.SystemClock;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
|
@ -88,6 +88,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
|||
import org.apache.hadoop.yarn.service.AbstractService;
|
||||
import org.apache.hadoop.yarn.service.CompositeService;
|
||||
import org.apache.hadoop.yarn.service.Service;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
|
||||
/**
|
||||
* The Map-Reduce Application Master.
|
||||
|
@ -114,8 +115,6 @@ public class MRAppMaster extends CompositeService {
|
|||
private Clock clock;
|
||||
private final long startTime = System.currentTimeMillis();
|
||||
private String appName;
|
||||
private final int startCount;
|
||||
private final ApplicationId appID;
|
||||
private final ApplicationAttemptId appAttemptID;
|
||||
protected final MRAppMetrics metrics;
|
||||
private Set<TaskId> completedTasksFromPreviousRun;
|
||||
|
@ -133,21 +132,16 @@ public class MRAppMaster extends CompositeService {
|
|||
|
||||
private Job job;
|
||||
|
||||
public MRAppMaster(ApplicationId applicationId, int startCount) {
|
||||
this(applicationId, new SystemClock(), startCount);
|
||||
public MRAppMaster(ApplicationAttemptId applicationAttemptId) {
|
||||
this(applicationAttemptId, new SystemClock());
|
||||
}
|
||||
|
||||
public MRAppMaster(ApplicationId applicationId, Clock clock, int startCount) {
|
||||
public MRAppMaster(ApplicationAttemptId applicationAttemptId, Clock clock) {
|
||||
super(MRAppMaster.class.getName());
|
||||
this.clock = clock;
|
||||
this.appID = applicationId;
|
||||
this.appAttemptID = RecordFactoryProvider.getRecordFactory(null)
|
||||
.newRecordInstance(ApplicationAttemptId.class);
|
||||
this.appAttemptID.setApplicationId(appID);
|
||||
this.appAttemptID.setAttemptId(startCount);
|
||||
this.startCount = startCount;
|
||||
this.appAttemptID = applicationAttemptId;
|
||||
this.metrics = MRAppMetrics.create();
|
||||
LOG.info("Created MRAppMaster for application " + applicationId);
|
||||
LOG.info("Created MRAppMaster for application " + applicationAttemptId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -159,9 +153,9 @@ public class MRAppMaster extends CompositeService {
|
|||
appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>");
|
||||
|
||||
if (conf.getBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false)
|
||||
&& startCount > 1) {
|
||||
&& appAttemptID.getAttemptId() > 1) {
|
||||
LOG.info("Recovery is enabled. Will try to recover from previous life.");
|
||||
Recovery recoveryServ = new RecoveryService(appID, clock, startCount);
|
||||
Recovery recoveryServ = new RecoveryService(appAttemptID, clock);
|
||||
addIfService(recoveryServ);
|
||||
dispatcher = recoveryServ.getDispatcher();
|
||||
clock = recoveryServ.getClock();
|
||||
|
@ -243,10 +237,10 @@ public class MRAppMaster extends CompositeService {
|
|||
// Read the file-system tokens from the localized tokens-file.
|
||||
Path jobSubmitDir =
|
||||
FileContext.getLocalFSFileContext().makeQualified(
|
||||
new Path(new File(MRConstants.JOB_SUBMIT_DIR)
|
||||
new Path(new File(MRJobConfig.JOB_SUBMIT_DIR)
|
||||
.getAbsolutePath()));
|
||||
Path jobTokenFile =
|
||||
new Path(jobSubmitDir, MRConstants.APPLICATION_TOKENS_FILE);
|
||||
new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
|
||||
fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
|
||||
LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
|
||||
+ jobTokenFile);
|
||||
|
@ -264,8 +258,8 @@ public class MRAppMaster extends CompositeService {
|
|||
// ////////// End of obtaining the tokens needed by the job. //////////
|
||||
|
||||
// create single job
|
||||
Job newJob = new JobImpl(appID, conf, dispatcher.getEventHandler(),
|
||||
taskAttemptListener, jobTokenSecretManager, fsTokens, clock, startCount,
|
||||
Job newJob = new JobImpl(appAttemptID, conf, dispatcher.getEventHandler(),
|
||||
taskAttemptListener, jobTokenSecretManager, fsTokens, clock,
|
||||
completedTasksFromPreviousRun, metrics, currentUser.getUserName());
|
||||
((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
|
||||
|
||||
|
@ -376,11 +370,11 @@ public class MRAppMaster extends CompositeService {
|
|||
}
|
||||
|
||||
public ApplicationId getAppID() {
|
||||
return appID;
|
||||
return appAttemptID.getApplicationId();
|
||||
}
|
||||
|
||||
public int getStartCount() {
|
||||
return startCount;
|
||||
return appAttemptID.getAttemptId();
|
||||
}
|
||||
|
||||
public AppContext getContext() {
|
||||
|
@ -505,7 +499,7 @@ public class MRAppMaster extends CompositeService {
|
|||
|
||||
@Override
|
||||
public ApplicationId getApplicationID() {
|
||||
return appID;
|
||||
return appAttemptID.getApplicationId();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -555,9 +549,9 @@ public class MRAppMaster extends CompositeService {
|
|||
// It's more test friendly to put it here.
|
||||
DefaultMetricsSystem.initialize("MRAppMaster");
|
||||
|
||||
/** create a job event for job intialization */
|
||||
// create a job event for job intialization
|
||||
JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
|
||||
/** send init to the job (this does NOT trigger job execution) */
|
||||
// Send init to the job (this does NOT trigger job execution)
|
||||
// This is a synchronous call, not an event through dispatcher. We want
|
||||
// job-init to be done completely here.
|
||||
jobEventDispatcher.handle(initJobEvent);
|
||||
|
@ -648,17 +642,21 @@ public class MRAppMaster extends CompositeService {
|
|||
|
||||
public static void main(String[] args) {
|
||||
try {
|
||||
//Configuration.addDefaultResource("job.xml");
|
||||
ApplicationId applicationId = RecordFactoryProvider
|
||||
.getRecordFactory(null).newRecordInstance(ApplicationId.class);
|
||||
applicationId.setClusterTimestamp(Long.valueOf(args[0]));
|
||||
applicationId.setId(Integer.valueOf(args[1]));
|
||||
int failCount = Integer.valueOf(args[2]);
|
||||
MRAppMaster appMaster = new MRAppMaster(applicationId, failCount);
|
||||
String applicationAttemptIdStr = System
|
||||
.getenv(ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV);
|
||||
if (applicationAttemptIdStr == null) {
|
||||
String msg = ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV
|
||||
+ " is null";
|
||||
LOG.error(msg);
|
||||
throw new IOException(msg);
|
||||
}
|
||||
ApplicationAttemptId applicationAttemptId = ConverterUtils
|
||||
.toApplicationAttemptId(applicationAttemptIdStr);
|
||||
MRAppMaster appMaster = new MRAppMaster(applicationAttemptId);
|
||||
Runtime.getRuntime().addShutdownHook(
|
||||
new CompositeServiceShutdownHook(appMaster));
|
||||
YarnConfiguration conf = new YarnConfiguration(new JobConf());
|
||||
conf.addResource(new Path(MRConstants.JOB_CONF_FILE));
|
||||
conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));
|
||||
conf.set(MRJobConfig.USER_NAME,
|
||||
System.getProperty("user.name"));
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
|
|
@ -149,7 +149,7 @@ public class MRClientService extends AbstractService
|
|||
+ ":" + server.getPort());
|
||||
LOG.info("Instantiated MRClientService at " + this.bindAddress);
|
||||
try {
|
||||
webApp = WebApps.$for("yarn", AppContext.class, appContext).with(conf).
|
||||
webApp = WebApps.$for("mapreduce", AppContext.class, appContext).with(conf).
|
||||
start(new AMWebApp());
|
||||
} catch (Exception e) {
|
||||
LOG.error("Webapps failed to start. Ignoring for now:", e);
|
||||
|
|
|
@ -64,7 +64,6 @@ import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
|
|||
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
|
||||
import org.apache.hadoop.mapreduce.task.JobContextImpl;
|
||||
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.MRConstants;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
|
||||
|
@ -93,6 +92,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
|
|||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
|
@ -101,6 +101,7 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.Clock;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
|
@ -129,11 +130,11 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
RecordFactoryProvider.getRecordFactory(null);
|
||||
|
||||
//final fields
|
||||
private final ApplicationAttemptId applicationAttemptId;
|
||||
private final Clock clock;
|
||||
private final JobACLsManager aclsManager;
|
||||
private final String username;
|
||||
private final Map<JobACL, AccessControlList> jobACLs;
|
||||
private final int startCount;
|
||||
private final Set<TaskId> completedTasksFromPreviousRun;
|
||||
private final Lock readLock;
|
||||
private final Lock writeLock;
|
||||
|
@ -365,26 +366,26 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
private Token<JobTokenIdentifier> jobToken;
|
||||
private JobTokenSecretManager jobTokenSecretManager;
|
||||
|
||||
public JobImpl(ApplicationId appID, Configuration conf,
|
||||
public JobImpl(ApplicationAttemptId applicationAttemptId, Configuration conf,
|
||||
EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
|
||||
JobTokenSecretManager jobTokenSecretManager,
|
||||
Credentials fsTokenCredentials, Clock clock, int startCount,
|
||||
Credentials fsTokenCredentials, Clock clock,
|
||||
Set<TaskId> completedTasksFromPreviousRun, MRAppMetrics metrics,
|
||||
String userName) {
|
||||
|
||||
this.applicationAttemptId = applicationAttemptId;
|
||||
this.jobId = recordFactory.newRecordInstance(JobId.class);
|
||||
this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
|
||||
this.conf = conf;
|
||||
this.metrics = metrics;
|
||||
this.clock = clock;
|
||||
this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
|
||||
this.startCount = startCount;
|
||||
this.userName = userName;
|
||||
jobId.setAppId(appID);
|
||||
jobId.setId(appID.getId());
|
||||
ApplicationId applicationId = applicationAttemptId.getApplicationId();
|
||||
jobId.setAppId(applicationId);
|
||||
jobId.setId(applicationId.getId());
|
||||
oldJobId = TypeConverter.fromYarn(jobId);
|
||||
LOG.info("Job created" +
|
||||
" appId=" + appID +
|
||||
" appId=" + applicationId +
|
||||
" jobId=" + jobId +
|
||||
" oldJobId=" + oldJobId);
|
||||
|
||||
|
@ -584,25 +585,17 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
public JobReport getReport() {
|
||||
readLock.lock();
|
||||
try {
|
||||
JobReport report = recordFactory.newRecordInstance(JobReport.class);
|
||||
report.setJobId(jobId);
|
||||
report.setJobState(getState());
|
||||
|
||||
// TODO - Fix to correctly setup report and to check state
|
||||
if (report.getJobState() == JobState.NEW) {
|
||||
return report;
|
||||
}
|
||||
|
||||
report.setStartTime(startTime);
|
||||
report.setFinishTime(finishTime);
|
||||
report.setSetupProgress(setupProgress);
|
||||
report.setCleanupProgress(cleanupProgress);
|
||||
report.setMapProgress(computeProgress(mapTasks));
|
||||
report.setReduceProgress(computeProgress(reduceTasks));
|
||||
report.setJobName(jobName);
|
||||
report.setUser(username);
|
||||
JobState state = getState();
|
||||
|
||||
return report;
|
||||
if (getState() == JobState.NEW) {
|
||||
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
|
||||
startTime, finishTime, setupProgress, 0.0f,
|
||||
0.0f, cleanupProgress);
|
||||
}
|
||||
|
||||
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
|
||||
startTime, finishTime, setupProgress, computeProgress(mapTasks),
|
||||
computeProgress(reduceTasks), cleanupProgress);
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
|
@ -1007,7 +1000,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
FileSystem.get(job.conf).makeQualified(
|
||||
new Path(path, oldJobIDString));
|
||||
job.remoteJobConfFile =
|
||||
new Path(job.remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
|
||||
new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
|
||||
|
||||
// Prepare the TaskAttemptListener server for authentication of Containers
|
||||
// TaskAttemptListener gets the information via jobTokenSecretManager.
|
||||
|
@ -1033,7 +1026,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
|
||||
Path remoteJobTokenFile =
|
||||
new Path(job.remoteJobSubmitDir,
|
||||
MRConstants.APPLICATION_TOKENS_FILE);
|
||||
MRJobConfig.APPLICATION_TOKENS_FILE);
|
||||
tokenStorage.writeTokenStorageFile(remoteJobTokenFile, job.conf);
|
||||
LOG.info("Writing back the job-token file on the remote file system:"
|
||||
+ remoteJobTokenFile.toString());
|
||||
|
@ -1078,7 +1071,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
job.conf, splits[i],
|
||||
job.taskAttemptListener,
|
||||
job.committer, job.jobToken, job.fsTokens.getAllTokens(),
|
||||
job.clock, job.completedTasksFromPreviousRun, job.startCount,
|
||||
job.clock, job.completedTasksFromPreviousRun,
|
||||
job.applicationAttemptId.getAttemptId(),
|
||||
job.metrics);
|
||||
job.addTask(task);
|
||||
}
|
||||
|
@ -1095,7 +1089,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
job.conf, job.numMapTasks,
|
||||
job.taskAttemptListener, job.committer, job.jobToken,
|
||||
job.fsTokens.getAllTokens(), job.clock,
|
||||
job.completedTasksFromPreviousRun, job.startCount, job.metrics);
|
||||
job.completedTasksFromPreviousRun,
|
||||
job.applicationAttemptId.getAttemptId(),
|
||||
job.metrics);
|
||||
job.addTask(task);
|
||||
}
|
||||
LOG.info("Number of reduces for job " + job.jobId + " = "
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -62,7 +61,6 @@ import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
|
|||
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
|
||||
import org.apache.hadoop.mapreduce.security.TokenCache;
|
||||
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
|
||||
import org.apache.hadoop.mapreduce.v2.MRConstants;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
|
||||
|
@ -103,6 +101,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
|||
import org.apache.hadoop.yarn.Clock;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerToken;
|
||||
|
@ -117,7 +116,6 @@ import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
|
|||
import org.apache.hadoop.yarn.state.SingleArcTransition;
|
||||
import org.apache.hadoop.yarn.state.StateMachine;
|
||||
import org.apache.hadoop.yarn.state.StateMachineFactory;
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.util.RackResolver;
|
||||
|
||||
|
@ -153,7 +151,7 @@ public abstract class TaskAttemptImpl implements
|
|||
private Token<JobTokenIdentifier> jobToken;
|
||||
private static AtomicBoolean initialClasspathFlag = new AtomicBoolean();
|
||||
private static String initialClasspath = null;
|
||||
private final Object classpathLock = new Object();
|
||||
private static final Object classpathLock = new Object();
|
||||
private long launchTime;
|
||||
private long finishTime;
|
||||
private WrappedProgressSplitsBlock progressSplitBlock;
|
||||
|
@ -518,8 +516,8 @@ public abstract class TaskAttemptImpl implements
|
|||
return initialClasspath;
|
||||
}
|
||||
Map<String, String> env = new HashMap<String, String>();
|
||||
MRApps.setInitialClasspath(env);
|
||||
initialClasspath = env.get(MRApps.CLASSPATH);
|
||||
MRApps.setClasspath(env);
|
||||
initialClasspath = env.get(Environment.CLASSPATH.name());
|
||||
initialClasspathFlag.set(true);
|
||||
return initialClasspath;
|
||||
}
|
||||
|
@ -531,16 +529,18 @@ public abstract class TaskAttemptImpl implements
|
|||
*/
|
||||
private ContainerLaunchContext createContainerLaunchContext() {
|
||||
|
||||
ContainerLaunchContext container =
|
||||
recordFactory.newRecordInstance(ContainerLaunchContext.class);
|
||||
|
||||
// Application resources
|
||||
Map<String, LocalResource> localResources =
|
||||
new HashMap<String, LocalResource>();
|
||||
|
||||
// Application environment
|
||||
Map<String, String> environment = new HashMap<String, String>();
|
||||
|
||||
|
||||
// Service data
|
||||
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
|
||||
|
||||
// Tokens
|
||||
ByteBuffer tokens = ByteBuffer.wrap(new byte[]{});
|
||||
try {
|
||||
FileSystem remoteFS = FileSystem.get(conf);
|
||||
|
||||
|
@ -550,7 +550,7 @@ public abstract class TaskAttemptImpl implements
|
|||
MRJobConfig.JAR))).makeQualified(remoteFS.getUri(),
|
||||
remoteFS.getWorkingDirectory());
|
||||
localResources.put(
|
||||
MRConstants.JOB_JAR,
|
||||
MRJobConfig.JOB_JAR,
|
||||
createLocalResource(remoteFS, recordFactory, remoteJobJar,
|
||||
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
|
||||
LOG.info("The job-jar file on the remote FS is "
|
||||
|
@ -570,9 +570,9 @@ public abstract class TaskAttemptImpl implements
|
|||
Path remoteJobSubmitDir =
|
||||
new Path(path, oldJobId.toString());
|
||||
Path remoteJobConfPath =
|
||||
new Path(remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
|
||||
new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
|
||||
localResources.put(
|
||||
MRConstants.JOB_CONF_FILE,
|
||||
MRJobConfig.JOB_CONF_FILE,
|
||||
createLocalResource(remoteFS, recordFactory, remoteJobConfPath,
|
||||
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
|
||||
LOG.info("The job-conf file on the remote FS is "
|
||||
|
@ -580,12 +580,8 @@ public abstract class TaskAttemptImpl implements
|
|||
// //////////// End of JobConf setup
|
||||
|
||||
// Setup DistributedCache
|
||||
MRApps.setupDistributedCache(conf, localResources, environment);
|
||||
MRApps.setupDistributedCache(conf, localResources);
|
||||
|
||||
// Set local-resources and environment
|
||||
container.setLocalResources(localResources);
|
||||
container.setEnvironment(environment);
|
||||
|
||||
// Setup up tokens
|
||||
Credentials taskCredentials = new Credentials();
|
||||
|
||||
|
@ -606,52 +602,43 @@ public abstract class TaskAttemptImpl implements
|
|||
LOG.info("Size of containertokens_dob is "
|
||||
+ taskCredentials.numberOfTokens());
|
||||
taskCredentials.writeTokenStorageToStream(containerTokens_dob);
|
||||
container.setContainerTokens(
|
||||
tokens =
|
||||
ByteBuffer.wrap(containerTokens_dob.getData(), 0,
|
||||
containerTokens_dob.getLength()));
|
||||
containerTokens_dob.getLength());
|
||||
|
||||
// Add shuffle token
|
||||
LOG.info("Putting shuffle token in serviceData");
|
||||
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
|
||||
serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
|
||||
ShuffleHandler.serializeServiceData(jobToken));
|
||||
container.setServiceData(serviceData);
|
||||
|
||||
MRApps.addToClassPath(container.getEnvironment(), getInitialClasspath());
|
||||
MRApps.addToEnvironment(
|
||||
environment,
|
||||
Environment.CLASSPATH.name(),
|
||||
getInitialClasspath());
|
||||
} catch (IOException e) {
|
||||
throw new YarnException(e);
|
||||
}
|
||||
|
||||
// Setup environment
|
||||
MapReduceChildJVM.setVMEnv(environment, remoteTask);
|
||||
|
||||
// Set up the launch command
|
||||
List<String> commands = MapReduceChildJVM.getVMCommand(
|
||||
taskAttemptListener.getAddress(), remoteTask,
|
||||
jvmID);
|
||||
|
||||
container.setContainerId(containerID);
|
||||
container.setUser(conf.get(MRJobConfig.USER_NAME)); // TODO: Fix
|
||||
|
||||
File workDir = new File("$PWD"); // Will be expanded by the shell.
|
||||
String containerLogDir =
|
||||
new File(ApplicationConstants.LOG_DIR_EXPANSION_VAR).toString();
|
||||
String childTmpDir = new File(workDir, "tmp").toString();
|
||||
String javaHome = "${JAVA_HOME}"; // Will be expanded by the shell.
|
||||
String nmLdLibraryPath = "{LD_LIBRARY_PATH}"; // Expanded by the shell?
|
||||
List<String> classPaths = new ArrayList<String>();
|
||||
|
||||
String localizedApplicationTokensFile =
|
||||
new File(workDir, MRConstants.APPLICATION_TOKENS_FILE).toString();
|
||||
classPaths.add(MRConstants.JOB_JAR);
|
||||
classPaths.add(MRConstants.YARN_MAPREDUCE_APP_JAR_PATH);
|
||||
classPaths.add(workDir.toString()); // TODO
|
||||
|
||||
// Construct the actual Container
|
||||
container.setCommands(MapReduceChildJVM.getVMCommand(
|
||||
taskAttemptListener.getAddress(), remoteTask, javaHome,
|
||||
workDir.toString(), containerLogDir, childTmpDir, jvmID));
|
||||
|
||||
MapReduceChildJVM.setVMEnv(container.getEnvironment(), classPaths,
|
||||
workDir.toString(), containerLogDir, nmLdLibraryPath, remoteTask,
|
||||
localizedApplicationTokensFile);
|
||||
|
||||
// Construct the actual Container
|
||||
ContainerLaunchContext container =
|
||||
recordFactory.newRecordInstance(ContainerLaunchContext.class);
|
||||
container.setContainerId(containerID);
|
||||
container.setUser(conf.get(MRJobConfig.USER_NAME));
|
||||
container.setResource(assignedCapability);
|
||||
container.setLocalResources(localResources);
|
||||
container.setEnvironment(environment);
|
||||
container.setCommands(commands);
|
||||
container.setServiceData(serviceData);
|
||||
container.setContainerTokens(tokens);
|
||||
|
||||
return container;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,8 @@ public class ContainerLauncherImpl extends AbstractService implements
|
|||
|
||||
private AppContext context;
|
||||
private ThreadPoolExecutor launcherPool;
|
||||
private static final int INITIAL_POOL_SIZE = 10;
|
||||
private int limitOnPoolSize;
|
||||
private Thread eventHandlingThread;
|
||||
private BlockingQueue<ContainerLauncherEvent> eventQueue =
|
||||
new LinkedBlockingQueue<ContainerLauncherEvent>();
|
||||
|
@ -96,16 +98,17 @@ public class ContainerLauncherImpl extends AbstractService implements
|
|||
YarnConfiguration.YARN_SECURITY_INFO,
|
||||
ContainerManagerSecurityInfo.class, SecurityInfo.class);
|
||||
this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
|
||||
this.limitOnPoolSize = conf.getInt(
|
||||
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
|
||||
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
|
||||
super.init(myLocalConfig);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
launcherPool =
|
||||
new ThreadPoolExecutor(getConfig().getInt(
|
||||
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT, 10),
|
||||
Integer.MAX_VALUE, 1, TimeUnit.HOURS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
launcherPool.prestartAllCoreThreads(); // Wait for work.
|
||||
// Start with a default core-pool size of 10 and change it dynamically.
|
||||
launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE,
|
||||
Integer.MAX_VALUE, 1, TimeUnit.HOURS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
eventHandlingThread = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
@ -117,6 +120,26 @@ public class ContainerLauncherImpl extends AbstractService implements
|
|||
LOG.error("Returning, interrupted : " + e);
|
||||
return;
|
||||
}
|
||||
|
||||
int poolSize = launcherPool.getCorePoolSize();
|
||||
|
||||
// See if we need up the pool size only if haven't reached the
|
||||
// maximum limit yet.
|
||||
if (poolSize != limitOnPoolSize) {
|
||||
|
||||
// nodes where containers will run at *this* point of time. This is
|
||||
// *not* the cluster size and doesn't need to be.
|
||||
int numNodes = ugiMap.size();
|
||||
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
|
||||
|
||||
if (poolSize <= idealPoolSize) {
|
||||
// Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
|
||||
// later is just a buffer so we are not always increasing the
|
||||
// pool-size
|
||||
launcherPool.setCorePoolSize(idealPoolSize + INITIAL_POOL_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
// the events from the queue are handled in parallel
|
||||
// using a thread pool
|
||||
launcherPool.execute(new EventProcessor(event));
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.mapreduce.v2.app.local;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -30,15 +31,19 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
|
|||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
|
||||
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
|
||||
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
|
||||
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
|
||||
import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
|
||||
import org.apache.hadoop.yarn.api.records.AMResponse;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
/**
|
||||
|
@ -65,6 +70,20 @@ public class LocalContainerAllocator extends RMCommunicator
|
|||
this.appID = context.getApplicationID();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void heartbeat() throws Exception {
|
||||
AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
|
||||
this.applicationAttemptId, this.lastResponseID, super
|
||||
.getApplicationProgress(), new ArrayList<ResourceRequest>(),
|
||||
new ArrayList<ContainerId>());
|
||||
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
|
||||
AMResponse response = allocateResponse.getAMResponse();
|
||||
if (response.getReboot()) {
|
||||
// TODO
|
||||
LOG.info("Event from RM: shutting down Application Master");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(ContainerAllocatorEvent event) {
|
||||
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
|
||||
|
|
|
@ -58,7 +58,7 @@ import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
|
|||
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
|
||||
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
|
||||
import org.apache.hadoop.yarn.Clock;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
|
@ -92,10 +92,9 @@ public class RecoveryService extends CompositeService implements Recovery {
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(RecoveryService.class);
|
||||
|
||||
private final ApplicationId appID;
|
||||
private final ApplicationAttemptId applicationAttemptId;
|
||||
private final Dispatcher dispatcher;
|
||||
private final ControlledClock clock;
|
||||
private final int startCount;
|
||||
|
||||
private JobInfo jobInfo = null;
|
||||
private final Map<TaskId, TaskInfo> completedTasks =
|
||||
|
@ -106,10 +105,10 @@ public class RecoveryService extends CompositeService implements Recovery {
|
|||
|
||||
private volatile boolean recoveryMode = false;
|
||||
|
||||
public RecoveryService(ApplicationId appID, Clock clock, int startCount) {
|
||||
public RecoveryService(ApplicationAttemptId applicationAttemptId,
|
||||
Clock clock) {
|
||||
super("RecoveringDispatcher");
|
||||
this.appID = appID;
|
||||
this.startCount = startCount;
|
||||
this.applicationAttemptId = applicationAttemptId;
|
||||
this.dispatcher = new RecoveryDispatcher();
|
||||
this.clock = new ControlledClock(clock);
|
||||
addService((Service) dispatcher);
|
||||
|
@ -152,7 +151,8 @@ public class RecoveryService extends CompositeService implements Recovery {
|
|||
|
||||
private void parse() throws IOException {
|
||||
// TODO: parse history file based on startCount
|
||||
String jobName = TypeConverter.fromYarn(appID).toString();
|
||||
String jobName =
|
||||
TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
|
||||
String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(getConfig());
|
||||
FSDataInputStream in = null;
|
||||
Path historyFile = null;
|
||||
|
@ -160,8 +160,9 @@ public class RecoveryService extends CompositeService implements Recovery {
|
|||
new Path(jobhistoryDir));
|
||||
FileContext fc = FileContext.getFileContext(histDirPath.toUri(),
|
||||
getConfig());
|
||||
//read the previous history file
|
||||
historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
|
||||
histDirPath, jobName, startCount - 1)); //read the previous history file
|
||||
histDirPath, jobName, (applicationAttemptId.getAttemptId() - 1)));
|
||||
in = fc.open(historyFile);
|
||||
JobHistoryParser parser = new JobHistoryParser(in);
|
||||
jobInfo = parser.parse();
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.mapreduce.v2.app.rm;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -29,6 +28,7 @@ import org.apache.hadoop.mapreduce.JobID;
|
|||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.TypeConverter;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
|
||||
|
@ -42,17 +42,12 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
|||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.AMRMProtocol;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
|
||||
import org.apache.hadoop.yarn.api.records.AMResponse;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
|
@ -64,7 +59,7 @@ import org.apache.hadoop.yarn.service.AbstractService;
|
|||
/**
|
||||
* Registers/unregisters to RM and sends heartbeats to RM.
|
||||
*/
|
||||
public class RMCommunicator extends AbstractService {
|
||||
public abstract class RMCommunicator extends AbstractService {
|
||||
private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
|
||||
private int rmPollInterval;//millis
|
||||
protected ApplicationId applicationId;
|
||||
|
@ -74,7 +69,7 @@ public class RMCommunicator extends AbstractService {
|
|||
protected EventHandler eventHandler;
|
||||
protected AMRMProtocol scheduler;
|
||||
private final ClientService clientService;
|
||||
private int lastResponseID;
|
||||
protected int lastResponseID;
|
||||
private Resource minContainerCapability;
|
||||
private Resource maxContainerCapability;
|
||||
|
||||
|
@ -121,6 +116,34 @@ public class RMCommunicator extends AbstractService {
|
|||
return job;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the appProgress. Can be used only after this component is started.
|
||||
* @return the appProgress.
|
||||
*/
|
||||
protected float getApplicationProgress() {
|
||||
// For now just a single job. In future when we have a DAG, we need an
|
||||
// aggregate progress.
|
||||
JobReport report = this.job.getReport();
|
||||
float setupWeight = 0.05f;
|
||||
float cleanupWeight = 0.05f;
|
||||
float mapWeight = 0.0f;
|
||||
float reduceWeight = 0.0f;
|
||||
int numMaps = this.job.getTotalMaps();
|
||||
int numReduces = this.job.getTotalReduces();
|
||||
if (numMaps == 0 && numReduces == 0) {
|
||||
} else if (numMaps == 0) {
|
||||
reduceWeight = 0.9f;
|
||||
} else if (numReduces == 0) {
|
||||
mapWeight = 0.9f;
|
||||
} else {
|
||||
mapWeight = reduceWeight = 0.45f;
|
||||
}
|
||||
return (report.getSetupProgress() * setupWeight
|
||||
+ report.getCleanupProgress() * cleanupWeight
|
||||
+ report.getMapProgress() * mapWeight + report.getReduceProgress()
|
||||
* reduceWeight);
|
||||
}
|
||||
|
||||
protected void register() {
|
||||
//Register
|
||||
String host =
|
||||
|
@ -262,18 +285,5 @@ public class RMCommunicator extends AbstractService {
|
|||
});
|
||||
}
|
||||
|
||||
protected synchronized void heartbeat() throws Exception {
|
||||
AllocateRequest allocateRequest =
|
||||
recordFactory.newRecordInstance(AllocateRequest.class);
|
||||
allocateRequest.setApplicationAttemptId(applicationAttemptId);
|
||||
allocateRequest.setResponseId(lastResponseID);
|
||||
allocateRequest.addAllAsks(new ArrayList<ResourceRequest>());
|
||||
allocateRequest.addAllReleases(new ArrayList<ContainerId>());
|
||||
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
|
||||
AMResponse response = allocateResponse.getAMResponse();
|
||||
if (response.getReboot()) {
|
||||
LOG.info("Event from RM: shutting down Application Master");
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void heartbeat() throws Exception;
|
||||
}
|
||||
|
|
|
@ -586,37 +586,21 @@ public class RMContainerAllocator extends RMContainerRequestor
|
|||
private ContainerRequest assign(Container allocated) {
|
||||
ContainerRequest assigned = null;
|
||||
|
||||
if (mapResourceReqt != reduceResourceReqt) {
|
||||
//assign based on size
|
||||
LOG.info("Assigning based on container size");
|
||||
if (allocated.getResource().getMemory() == mapResourceReqt) {
|
||||
assigned = assignToFailedMap(allocated);
|
||||
if (assigned == null) {
|
||||
assigned = assignToMap(allocated);
|
||||
}
|
||||
} else if (allocated.getResource().getMemory() == reduceResourceReqt) {
|
||||
assigned = assignToReduce(allocated);
|
||||
}
|
||||
|
||||
return assigned;
|
||||
}
|
||||
|
||||
//container can be given to either map or reduce
|
||||
//assign based on priority
|
||||
|
||||
//try to assign to earlierFailedMaps if present
|
||||
assigned = assignToFailedMap(allocated);
|
||||
|
||||
//Assign to reduces before assigning to maps ?
|
||||
if (assigned == null) {
|
||||
Priority priority = allocated.getPriority();
|
||||
if (PRIORITY_FAST_FAIL_MAP.equals(priority)) {
|
||||
LOG.info("Assigning container " + allocated + " to fast fail map");
|
||||
assigned = assignToFailedMap(allocated);
|
||||
} else if (PRIORITY_REDUCE.equals(priority)) {
|
||||
LOG.info("Assigning container " + allocated + " to reduce");
|
||||
assigned = assignToReduce(allocated);
|
||||
}
|
||||
|
||||
//try to assign to maps if present
|
||||
if (assigned == null) {
|
||||
} else if (PRIORITY_MAP.equals(priority)) {
|
||||
LOG.info("Assigning container " + allocated + " to map");
|
||||
assigned = assignToMap(allocated);
|
||||
} else {
|
||||
LOG.warn("Container allocated at unwanted priority: " + priority +
|
||||
". Returning to RM...");
|
||||
}
|
||||
|
||||
|
||||
return assigned;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
|||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
|
||||
/**
|
||||
* Keeps the data structures to send container requests to RM.
|
||||
|
@ -107,15 +108,11 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode);
|
||||
}
|
||||
|
||||
protected abstract void heartbeat() throws Exception;
|
||||
|
||||
protected AMResponse makeRemoteRequest() throws YarnRemoteException {
|
||||
AllocateRequest allocateRequest = recordFactory
|
||||
.newRecordInstance(AllocateRequest.class);
|
||||
allocateRequest.setApplicationAttemptId(applicationAttemptId);
|
||||
allocateRequest.setResponseId(lastResponseID);
|
||||
allocateRequest.addAllAsks(new ArrayList<ResourceRequest>(ask));
|
||||
allocateRequest.addAllReleases(new ArrayList<ContainerId>(release));
|
||||
AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
|
||||
applicationAttemptId, lastResponseID, super.getApplicationProgress(),
|
||||
new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(
|
||||
release));
|
||||
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
|
||||
AMResponse response = allocateResponse.getAMResponse();
|
||||
lastResponseID = response.getResponseId();
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.v2.MRConstants;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
|
||||
|
@ -87,7 +86,7 @@ public class DefaultSpeculator extends AbstractService implements
|
|||
private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds
|
||||
= new ConcurrentHashMap<JobId, AtomicInteger>();
|
||||
|
||||
private final Set<TaskId> mayHaveSpeculated = new HashSet();
|
||||
private final Set<TaskId> mayHaveSpeculated = new HashSet<TaskId>();
|
||||
|
||||
private final Configuration conf;
|
||||
private AppContext context;
|
||||
|
|
|
@ -44,6 +44,7 @@ public class JobConfPage extends AppView {
|
|||
set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
|
||||
: join("Configuration for MapReduce Job ", $(JOB_ID)));
|
||||
commonPreHead(html);
|
||||
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
|
||||
set(DATATABLES_ID, "conf");
|
||||
set(initID(DATATABLES, "conf"), confTableInit());
|
||||
set(postInitID(DATATABLES, "conf"), confPostTableInit());
|
||||
|
|
|
@ -38,9 +38,9 @@ public class NavBlock extends HtmlBlock {
|
|||
div("#nav").
|
||||
h3("Cluster").
|
||||
ul().
|
||||
li().a(url(rmweb, prefix(), "cluster"), "About")._().
|
||||
li().a(url(rmweb, prefix(), "apps"), "Applications")._().
|
||||
li().a(url(rmweb, prefix(), "scheduler"), "Scheduler")._()._().
|
||||
li().a(url(rmweb, "cluster", "cluster"), "About")._().
|
||||
li().a(url(rmweb, "cluster", "apps"), "Applications")._().
|
||||
li().a(url(rmweb, "cluster", "scheduler"), "Scheduler")._()._().
|
||||
h3("Application").
|
||||
ul().
|
||||
li().a(url("app/info"), "About")._().
|
||||
|
|
|
@ -85,7 +85,7 @@ public class TaskPage extends AppView {
|
|||
if (containerId != null) {
|
||||
String containerIdStr = ConverterUtils.toString(containerId);
|
||||
nodeTd._(" ").
|
||||
a(".logslink", url("http://", nodeHttpAddr, "yarn", "containerlogs",
|
||||
a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
|
||||
containerIdStr), "logs");
|
||||
}
|
||||
nodeTd._().
|
||||
|
|
|
@ -66,6 +66,7 @@ import org.apache.hadoop.security.Credentials;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.Clock;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
|
@ -91,7 +92,7 @@ public class MRApp extends MRAppMaster {
|
|||
private File testWorkDir;
|
||||
private Path testAbsPath;
|
||||
|
||||
private final RecordFactory recordFactory =
|
||||
private static final RecordFactory recordFactory =
|
||||
RecordFactoryProvider.getRecordFactory(null);
|
||||
|
||||
//if true, tasks complete automatically as soon as they are launched
|
||||
|
@ -100,7 +101,7 @@ public class MRApp extends MRAppMaster {
|
|||
static ApplicationId applicationId;
|
||||
|
||||
static {
|
||||
applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
|
||||
applicationId = recordFactory.newRecordInstance(ApplicationId.class);
|
||||
applicationId.setClusterTimestamp(0);
|
||||
applicationId.setId(0);
|
||||
}
|
||||
|
@ -108,9 +109,19 @@ public class MRApp extends MRAppMaster {
|
|||
public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) {
|
||||
this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
|
||||
}
|
||||
|
||||
private static ApplicationAttemptId getApplicationAttemptId(
|
||||
ApplicationId applicationId, int startCount) {
|
||||
ApplicationAttemptId applicationAttemptId =
|
||||
recordFactory.newRecordInstance(ApplicationAttemptId.class);
|
||||
applicationAttemptId.setApplicationId(applicationId);
|
||||
applicationAttemptId.setAttemptId(startCount);
|
||||
return applicationAttemptId;
|
||||
}
|
||||
|
||||
public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart, int startCount) {
|
||||
super(applicationId, startCount);
|
||||
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
|
||||
boolean cleanOnStart, int startCount) {
|
||||
super(getApplicationAttemptId(applicationId, startCount));
|
||||
this.testWorkDir = new File("target", testName);
|
||||
testAbsPath = new Path(testWorkDir.getAbsolutePath());
|
||||
LOG.info("PathUsed: " + testAbsPath);
|
||||
|
@ -391,11 +402,12 @@ public class MRApp extends MRAppMaster {
|
|||
return localStateMachine;
|
||||
}
|
||||
|
||||
public TestJob(Configuration conf, ApplicationId appID,
|
||||
public TestJob(Configuration conf, ApplicationId applicationId,
|
||||
EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
|
||||
Clock clock, String user) {
|
||||
super(appID, conf, eventHandler, taskAttemptListener,
|
||||
new JobTokenSecretManager(), new Credentials(), clock, getStartCount(),
|
||||
super(getApplicationAttemptId(applicationId, getStartCount()),
|
||||
conf, eventHandler, taskAttemptListener,
|
||||
new JobTokenSecretManager(), new Credentials(), clock,
|
||||
getCompletedTaskFromPreviousRun(), metrics, user);
|
||||
|
||||
// This "this leak" is okay because the retained pointer is in an
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationState;
|
||||
import org.apache.hadoop.yarn.api.records.NodeReport;
|
||||
import org.apache.hadoop.yarn.api.records.QueueACL;
|
||||
import org.apache.hadoop.yarn.api.records.QueueState;
|
||||
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
|
@ -280,16 +281,28 @@ public class TypeConverter {
|
|||
}
|
||||
|
||||
public static org.apache.hadoop.mapred.JobStatus fromYarn(
|
||||
JobReport jobreport, String jobFile, String trackingUrl) {
|
||||
JobReport jobreport, String jobFile) {
|
||||
JobPriority jobPriority = JobPriority.NORMAL;
|
||||
return new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
|
||||
jobreport.getSetupProgress(), jobreport.getMapProgress(),
|
||||
jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
|
||||
fromYarn(jobreport.getJobState()),
|
||||
jobPriority, jobreport.getUser(), jobreport.getJobName(),
|
||||
jobFile, trackingUrl);
|
||||
org.apache.hadoop.mapred.JobStatus jobStatus =
|
||||
new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
|
||||
jobreport.getSetupProgress(), jobreport.getMapProgress(),
|
||||
jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
|
||||
fromYarn(jobreport.getJobState()),
|
||||
jobPriority, jobreport.getUser(), jobreport.getJobName(),
|
||||
jobFile, jobreport.getTrackingUrl());
|
||||
jobStatus.setFailureInfo(jobreport.getDiagnostics());
|
||||
return jobStatus;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.mapreduce.QueueState fromYarn(
|
||||
QueueState state) {
|
||||
org.apache.hadoop.mapreduce.QueueState qState =
|
||||
org.apache.hadoop.mapreduce.QueueState.getState(
|
||||
state.toString().toLowerCase());
|
||||
return qState;
|
||||
}
|
||||
|
||||
|
||||
public static int fromYarn(JobState state) {
|
||||
switch (state) {
|
||||
case NEW:
|
||||
|
@ -412,6 +425,7 @@ public class TypeConverter {
|
|||
);
|
||||
jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
|
||||
jobStatus.setStartTime(application.getStartTime());
|
||||
jobStatus.setFailureInfo(application.getDiagnostics());
|
||||
return jobStatus;
|
||||
}
|
||||
|
||||
|
@ -431,9 +445,9 @@ public class TypeConverter {
|
|||
|
||||
public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo
|
||||
queueInfo, Configuration conf) {
|
||||
return new QueueInfo(queueInfo.getQueueName(),
|
||||
queueInfo.toString(), QueueState.RUNNING,
|
||||
TypeConverter.fromYarnApps(queueInfo.getApplications(), conf));
|
||||
return new QueueInfo(queueInfo.getQueueName(),queueInfo.toString(),
|
||||
fromYarn(queueInfo.getQueueState()), TypeConverter.fromYarnApps(
|
||||
queueInfo.getApplications(), conf));
|
||||
}
|
||||
|
||||
public static QueueInfo[] fromYarnQueueInfo(
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2;
|
||||
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public interface MRConstants {
|
||||
// This should be the directory where splits file gets localized on the node
|
||||
// running ApplicationMaster.
|
||||
public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
|
||||
|
||||
// This should be the name of the localized job-configuration file on the node
|
||||
// running ApplicationMaster and Task
|
||||
public static final String JOB_CONF_FILE = "job.xml";
|
||||
// This should be the name of the localized job-jar file on the node running
|
||||
// individual containers/tasks.
|
||||
public static final String JOB_JAR = "job.jar";
|
||||
|
||||
public static final String HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME =
|
||||
"hadoop-mapreduce-client-app-0.24.0-SNAPSHOT.jar";
|
||||
|
||||
public static final String YARN_MAPREDUCE_APP_JAR_PATH =
|
||||
"$YARN_HOME/modules/" + HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
|
||||
|
||||
// The token file for the application. Should contain tokens for access to
|
||||
// remote file system and may optionally contain application specific tokens.
|
||||
// For now, generated by the AppManagers and used by NodeManagers and the
|
||||
// Containers.
|
||||
public static final String APPLICATION_TOKENS_FILE = "appTokens";
|
||||
}
|
|
@ -29,6 +29,8 @@ public interface JobReport {
|
|||
public abstract long getFinishTime();
|
||||
public abstract String getUser();
|
||||
public abstract String getJobName();
|
||||
public abstract String getTrackingUrl();
|
||||
public abstract String getDiagnostics();
|
||||
|
||||
public abstract void setJobId(JobId jobId);
|
||||
public abstract void setJobState(JobState jobState);
|
||||
|
@ -40,4 +42,6 @@ public interface JobReport {
|
|||
public abstract void setFinishTime(long finishTime);
|
||||
public abstract void setUser(String user);
|
||||
public abstract void setJobName(String jobName);
|
||||
public abstract void setTrackingUrl(String trackingUrl);
|
||||
public abstract void setDiagnostics(String diagnostics);
|
||||
}
|
||||
|
|
|
@ -206,6 +206,30 @@ public class JobReportPBImpl extends ProtoBase<JobReportProto> implements JobRep
|
|||
builder.setJobName((jobName));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getTrackingUrl() {
|
||||
JobReportProtoOrBuilder p = viaProto ? proto : builder;
|
||||
return (p.getTrackingUrl());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setTrackingUrl(String trackingUrl) {
|
||||
maybeInitBuilder();
|
||||
builder.setTrackingUrl(trackingUrl);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDiagnostics() {
|
||||
JobReportProtoOrBuilder p = viaProto ? proto : builder;
|
||||
return p.getDiagnostics();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDiagnostics(String diagnostics) {
|
||||
maybeInitBuilder();
|
||||
builder.setDiagnostics(diagnostics);
|
||||
}
|
||||
|
||||
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
|
||||
return new JobIdPBImpl(p);
|
||||
}
|
||||
|
|
|
@ -489,7 +489,7 @@ public class JobHistoryUtils {
|
|||
sb.append(address.getHostName());
|
||||
}
|
||||
sb.append(":").append(address.getPort());
|
||||
sb.append("/yarn/job/"); // TODO This will change when the history server
|
||||
sb.append("/jobhistory/job/"); // TODO This will change when the history server
|
||||
// understands apps.
|
||||
// TOOD Use JobId toString once UI stops using _id_id
|
||||
sb.append("job_").append(appId.getClusterTimestamp());
|
||||
|
|
|
@ -39,14 +39,14 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
|
||||
import org.apache.hadoop.mapreduce.v2.MRConstants;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResource;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResourceType;
|
||||
|
@ -167,7 +167,7 @@ public class MRApps extends Apps {
|
|||
return TaskAttemptStateUI.valueOf(attemptStateStr);
|
||||
}
|
||||
|
||||
public static void setInitialClasspath(
|
||||
private static void setMRFrameworkClasspath(
|
||||
Map<String, String> environment) throws IOException {
|
||||
InputStream classpathFileStream = null;
|
||||
BufferedReader reader = null;
|
||||
|
@ -182,30 +182,17 @@ public class MRApps extends Apps {
|
|||
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
|
||||
String cp = reader.readLine();
|
||||
if (cp != null) {
|
||||
addToClassPath(environment, cp.trim());
|
||||
addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
|
||||
}
|
||||
// Put the file itself on classpath for tasks.
|
||||
addToClassPath(environment,
|
||||
addToEnvironment(
|
||||
environment,
|
||||
Environment.CLASSPATH.name(),
|
||||
thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
|
||||
|
||||
// If runtime env is different.
|
||||
if (System.getenv().get("YARN_HOME") != null) {
|
||||
ShellCommandExecutor exec =
|
||||
new ShellCommandExecutor(new String[] {
|
||||
System.getenv().get("YARN_HOME") + "/bin/yarn",
|
||||
"classpath" });
|
||||
exec.execute();
|
||||
addToClassPath(environment, exec.getOutput().trim());
|
||||
}
|
||||
|
||||
// Get yarn mapreduce-app classpath
|
||||
if (System.getenv().get("HADOOP_MAPRED_HOME")!= null) {
|
||||
ShellCommandExecutor exec =
|
||||
new ShellCommandExecutor(new String[] {
|
||||
System.getenv().get("HADOOP_MAPRED_HOME") + "/bin/mapred",
|
||||
"classpath" });
|
||||
exec.execute();
|
||||
addToClassPath(environment, exec.getOutput().trim());
|
||||
// Add standard Hadoop classes
|
||||
for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
|
||||
addToEnvironment(environment, Environment.CLASSPATH.name(), c);
|
||||
}
|
||||
} finally {
|
||||
if (classpathFileStream != null) {
|
||||
|
@ -217,20 +204,35 @@ public class MRApps extends Apps {
|
|||
}
|
||||
// TODO: Remove duplicates.
|
||||
}
|
||||
|
||||
private static final String SYSTEM_PATH_SEPARATOR =
|
||||
System.getProperty("path.separator");
|
||||
|
||||
public static void addToClassPath(
|
||||
Map<String, String> environment, String fileName) {
|
||||
String classpath = environment.get(CLASSPATH);
|
||||
if (classpath == null) {
|
||||
classpath = fileName;
|
||||
public static void addToEnvironment(
|
||||
Map<String, String> environment,
|
||||
String variable, String value) {
|
||||
String val = environment.get(variable);
|
||||
if (val == null) {
|
||||
val = value;
|
||||
} else {
|
||||
classpath = classpath + ":" + fileName;
|
||||
val = val + SYSTEM_PATH_SEPARATOR + value;
|
||||
}
|
||||
environment.put(CLASSPATH, classpath);
|
||||
environment.put(variable, val);
|
||||
}
|
||||
|
||||
public static final String CLASSPATH = "CLASSPATH";
|
||||
|
||||
public static void setClasspath(Map<String, String> environment)
|
||||
throws IOException {
|
||||
MRApps.addToEnvironment(
|
||||
environment,
|
||||
Environment.CLASSPATH.name(),
|
||||
MRJobConfig.JOB_JAR);
|
||||
MRApps.addToEnvironment(
|
||||
environment,
|
||||
Environment.CLASSPATH.name(),
|
||||
Environment.PWD.$() + Path.SEPARATOR + "*");
|
||||
MRApps.setMRFrameworkClasspath(environment);
|
||||
}
|
||||
|
||||
private static final String STAGING_CONSTANT = ".staging";
|
||||
public static Path getStagingAreaDir(Configuration conf, String user) {
|
||||
return new Path(
|
||||
|
@ -241,7 +243,7 @@ public class MRApps extends Apps {
|
|||
public static String getJobFile(Configuration conf, String user,
|
||||
org.apache.hadoop.mapreduce.JobID jobId) {
|
||||
Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user),
|
||||
jobId.toString() + Path.SEPARATOR + MRConstants.JOB_CONF_FILE);
|
||||
jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
|
||||
return jobFile.toString();
|
||||
}
|
||||
|
||||
|
@ -260,12 +262,11 @@ public class MRApps extends Apps {
|
|||
|
||||
public static void setupDistributedCache(
|
||||
Configuration conf,
|
||||
Map<String, LocalResource> localResources,
|
||||
Map<String, String> env)
|
||||
Map<String, LocalResource> localResources)
|
||||
throws IOException {
|
||||
|
||||
// Cache archives
|
||||
parseDistributedCacheArtifacts(conf, localResources, env,
|
||||
parseDistributedCacheArtifacts(conf, localResources,
|
||||
LocalResourceType.ARCHIVE,
|
||||
DistributedCache.getCacheArchives(conf),
|
||||
parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)),
|
||||
|
@ -275,7 +276,7 @@ public class MRApps extends Apps {
|
|||
|
||||
// Cache files
|
||||
parseDistributedCacheArtifacts(conf,
|
||||
localResources, env,
|
||||
localResources,
|
||||
LocalResourceType.FILE,
|
||||
DistributedCache.getCacheFiles(conf),
|
||||
parseTimeStamps(DistributedCache.getFileTimestamps(conf)),
|
||||
|
@ -290,7 +291,6 @@ public class MRApps extends Apps {
|
|||
private static void parseDistributedCacheArtifacts(
|
||||
Configuration conf,
|
||||
Map<String, LocalResource> localResources,
|
||||
Map<String, String> env,
|
||||
LocalResourceType type,
|
||||
URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[],
|
||||
Path[] pathsToPutOnClasspath) throws IOException {
|
||||
|
@ -339,9 +339,6 @@ public class MRApps extends Apps {
|
|||
: LocalResourceVisibility.PRIVATE,
|
||||
sizes[i], timestamps[i])
|
||||
);
|
||||
if (classPaths.containsKey(u.getPath())) {
|
||||
MRApps.addToClassPath(env, linkName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -358,6 +355,42 @@ public class MRApps extends Apps {
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public static void setEnvFromInputString(Map<String, String> env,
|
||||
String envString) {
|
||||
if (envString != null && envString.length() > 0) {
|
||||
String childEnvs[] = envString.split(",");
|
||||
for (String cEnv : childEnvs) {
|
||||
String[] parts = cEnv.split("="); // split on '='
|
||||
String value = env.get(parts[0]);
|
||||
|
||||
if (value != null) {
|
||||
// Replace $env with the child's env constructed by NM's
|
||||
// For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
|
||||
value = parts[1].replace("$" + parts[0], value);
|
||||
} else {
|
||||
// example PATH=$PATH:/tmp
|
||||
value = System.getenv(parts[0]);
|
||||
if (value != null) {
|
||||
// the env key is present in the tt's env
|
||||
value = parts[1].replace("$" + parts[0], value);
|
||||
} else {
|
||||
// check for simple variable substitution
|
||||
// for e.g. ROOT=$HOME
|
||||
String envValue = System.getenv(parts[1].substring(1));
|
||||
if (envValue != null) {
|
||||
value = envValue;
|
||||
} else {
|
||||
// the env key is note present anywhere .. simply set it
|
||||
// example X=$X:/tmp or X=/tmp
|
||||
value = parts[1].replace("$" + parts[0], "");
|
||||
}
|
||||
}
|
||||
}
|
||||
addToEnvironment(env, parts[0], value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -19,27 +19,25 @@
|
|||
package org.apache.hadoop.mapreduce.v2.util;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
public class MRBuilderUtils {
|
||||
|
||||
private static final RecordFactory recordFactory = RecordFactoryProvider
|
||||
.getRecordFactory(null);
|
||||
|
||||
public static JobId newJobId(ApplicationId appId, int id) {
|
||||
JobId jobId = recordFactory.newRecordInstance(JobId.class);
|
||||
JobId jobId = Records.newRecord(JobId.class);
|
||||
jobId.setAppId(appId);
|
||||
jobId.setId(id);
|
||||
return jobId;
|
||||
}
|
||||
|
||||
public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
|
||||
TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
|
||||
TaskId taskId = Records.newRecord(TaskId.class);
|
||||
taskId.setJobId(jobId);
|
||||
taskId.setId(id);
|
||||
taskId.setTaskType(taskType);
|
||||
|
@ -48,9 +46,27 @@ public class MRBuilderUtils {
|
|||
|
||||
public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) {
|
||||
TaskAttemptId taskAttemptId =
|
||||
recordFactory.newRecordInstance(TaskAttemptId.class);
|
||||
Records.newRecord(TaskAttemptId.class);
|
||||
taskAttemptId.setTaskId(taskId);
|
||||
taskAttemptId.setId(attemptId);
|
||||
return taskAttemptId;
|
||||
}
|
||||
|
||||
public static JobReport newJobReport(JobId jobId, String jobName,
|
||||
String userName, JobState state, long startTime, long finishTime,
|
||||
float setupProgress, float mapProgress, float reduceProgress,
|
||||
float cleanupProgress) {
|
||||
JobReport report = Records.newRecord(JobReport.class);
|
||||
report.setJobId(jobId);
|
||||
report.setJobName(jobName);
|
||||
report.setUser(userName);
|
||||
report.setJobState(state);
|
||||
report.setStartTime(startTime);
|
||||
report.setFinishTime(finishTime);
|
||||
report.setSetupProgress(setupProgress);
|
||||
report.setCleanupProgress(cleanupProgress);
|
||||
report.setMapProgress(mapProgress);
|
||||
report.setReduceProgress(reduceProgress);
|
||||
return report;
|
||||
}
|
||||
}
|
|
@ -143,6 +143,8 @@ message JobReportProto {
|
|||
optional int64 finish_time = 8;
|
||||
optional string user = 9;
|
||||
optional string jobName = 10;
|
||||
optional string trackingUrl = 11;
|
||||
optional string diagnostics = 12;
|
||||
}
|
||||
|
||||
enum TaskAttemptCompletionEventStatusProto {
|
||||
|
|
|
@ -19,11 +19,14 @@ package org.apache.hadoop.mapreduce;
|
|||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationState;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
|
||||
import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
import org.junit.Test;
|
||||
|
@ -67,4 +70,14 @@ public class TestTypeConverter {
|
|||
Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId());
|
||||
Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFromYarnQueueInfo() {
|
||||
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = new QueueInfoPBImpl();
|
||||
queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED);
|
||||
org.apache.hadoop.mapreduce.QueueInfo returned =
|
||||
TypeConverter.fromYarn(queueInfo, new Configuration());
|
||||
Assert.assertEquals("queueInfo translation didn't work.",
|
||||
returned.getState().toString(), queueInfo.getQueueState().toString().toLowerCase());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
|||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.MRConstants;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
|
@ -115,7 +114,8 @@ public class TestMRApps {
|
|||
@Test public void testGetJobFileWithUser() {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/my/path/to/staging");
|
||||
String jobFile = MRApps.getJobFile(conf, "dummy-user", new JobID("dummy-job", 12345));
|
||||
String jobFile = MRApps.getJobFile(conf, "dummy-user",
|
||||
new JobID("dummy-job", 12345));
|
||||
assertNotNull("getJobFile results in null.", jobFile);
|
||||
assertEquals("jobFile with specified user is not as expected.",
|
||||
"/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile);
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.mapred.IFile.Reader;
|
|||
import org.apache.hadoop.mapred.IFile.Writer;
|
||||
import org.apache.hadoop.mapred.Merger.Segment;
|
||||
import org.apache.hadoop.mapreduce.MRConfig;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.TaskAttemptID;
|
||||
|
||||
/**
|
||||
|
@ -560,7 +561,7 @@ public class BackupStore<K,V> {
|
|||
|
||||
private Writer<K,V> createSpillFile() throws IOException {
|
||||
Path tmp =
|
||||
new Path(Constants.OUTPUT + "/backup_" + tid.getId() + "_"
|
||||
new Path(MRJobConfig.OUTPUT + "/backup_" + tid.getId() + "_"
|
||||
+ (spillNumber++) + ".out");
|
||||
|
||||
LOG.info("Created file: " + tmp);
|
||||
|
|
|
@ -348,6 +348,7 @@ public class JobConf extends Configuration {
|
|||
*/
|
||||
public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
|
||||
|
||||
|
||||
/**
|
||||
* Construct a map/reduce job configuration.
|
||||
*/
|
||||
|
|
|
@ -321,6 +321,10 @@ public class JobStatus extends org.apache.hadoop.mapreduce.JobStatus {
|
|||
super.setJobACLs(acls);
|
||||
}
|
||||
|
||||
public synchronized void setFailureInfo(String failureInfo) {
|
||||
super.setFailureInfo(failureInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the priority of the job, defaulting to NORMAL.
|
||||
* @param jp new job priority
|
||||
|
|
|
@ -17,11 +17,16 @@
|
|||
*/
|
||||
package org.apache.hadoop.mapred;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
||||
/*******************************
|
||||
* Some handy constants
|
||||
*
|
||||
*******************************/
|
||||
interface MRConstants {
|
||||
@Private
|
||||
@Unstable
|
||||
public interface MRConstants {
|
||||
//
|
||||
// Timeouts, constants
|
||||
//
|
||||
|
@ -53,5 +58,6 @@ interface MRConstants {
|
|||
*/
|
||||
public static final String FOR_REDUCE_TASK = "for-reduce-task";
|
||||
|
||||
/** Used in MRv1, mostly in TaskTracker code **/
|
||||
public static final String WORKDIR = "work";
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configurable;
|
|||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapreduce.MRConfig;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
|
||||
/**
|
||||
* Manipulate the working area for the transient store for maps and reduces.
|
||||
|
@ -54,7 +55,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getOutputFile()
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
|
||||
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
|
||||
+ MAP_OUTPUT_FILENAME_STRING, getConf());
|
||||
}
|
||||
|
||||
|
@ -68,7 +69,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getOutputFileForWrite(long size)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
|
||||
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
|
||||
+ MAP_OUTPUT_FILENAME_STRING, size, getConf());
|
||||
}
|
||||
|
||||
|
@ -89,7 +90,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getOutputIndexFile()
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
|
||||
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
|
||||
+ MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
|
||||
getConf());
|
||||
}
|
||||
|
@ -104,7 +105,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getOutputIndexFileForWrite(long size)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
|
||||
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
|
||||
+ MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
|
||||
size, getConf());
|
||||
}
|
||||
|
@ -128,7 +129,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getSpillFile(int spillNumber)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
|
||||
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
|
||||
+ spillNumber + ".out", getConf());
|
||||
}
|
||||
|
||||
|
@ -143,7 +144,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getSpillFileForWrite(int spillNumber, long size)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
|
||||
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
|
||||
+ spillNumber + ".out", size, getConf());
|
||||
}
|
||||
|
||||
|
@ -157,7 +158,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getSpillIndexFile(int spillNumber)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
|
||||
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
|
||||
+ spillNumber + ".out.index", getConf());
|
||||
}
|
||||
|
||||
|
@ -172,7 +173,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public Path getSpillIndexFileForWrite(int spillNumber, long size)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
|
||||
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
|
||||
+ spillNumber + ".out.index", size, getConf());
|
||||
}
|
||||
|
||||
|
@ -187,7 +188,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
public Path getInputFile(int mapId)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathToRead(String.format(
|
||||
REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, Integer
|
||||
REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, Integer
|
||||
.valueOf(mapId)), getConf());
|
||||
}
|
||||
|
||||
|
@ -204,7 +205,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
long size)
|
||||
throws IOException {
|
||||
return lDirAlloc.getLocalPathForWrite(String.format(
|
||||
REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, mapId.getId()),
|
||||
REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, mapId.getId()),
|
||||
size, getConf());
|
||||
}
|
||||
|
||||
|
@ -212,7 +213,7 @@ public class MROutputFiles extends MapOutputFile {
|
|||
@Override
|
||||
public void removeAll()
|
||||
throws IOException {
|
||||
((JobConf)getConf()).deleteLocalFiles(Constants.OUTPUT);
|
||||
((JobConf)getConf()).deleteLocalFiles(MRJobConfig.OUTPUT);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.SecureIOUtils;
|
||||
import org.apache.hadoop.mapreduce.JobID;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.util.ProcessTree;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.log4j.Appender;
|
||||
|
@ -75,10 +76,18 @@ public class TaskLog {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static String getMRv2LogDir() {
|
||||
return System.getProperty(MRJobConfig.TASK_LOG_DIR);
|
||||
}
|
||||
|
||||
public static File getTaskLogFile(TaskAttemptID taskid, boolean isCleanup,
|
||||
LogName filter) {
|
||||
return new File(getAttemptDir(taskid, isCleanup), filter.toString());
|
||||
if (getMRv2LogDir() != null) {
|
||||
return new File(getMRv2LogDir(), filter.toString());
|
||||
} else {
|
||||
return new File(getAttemptDir(taskid, isCleanup), filter.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static File getRealTaskLogFileLocation(TaskAttemptID taskid,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue