Reverting the previous trunk merge since it added other unintended changes in addition

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1177127 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2011-09-29 00:33:34 +00:00
parent 122113922f
commit 9992cae541
228 changed files with 2294 additions and 7365 deletions

View File

@ -598,8 +598,8 @@ runTests () {
echo ""
echo ""
echo "$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess"
$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess
echo "$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess"
$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess
if [[ $? != 0 ]] ; then
### Find and format names of failed tests
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`

View File

@ -2,12 +2,6 @@ Hadoop Change Log
Trunk (unreleased changes)
INCOMPATIBLE CHANGES
HADOOP-7542. Change Configuration XML format to 1.1 to add support for
serializing additional characters. This requires XML1.1
support in the XML parser (Christopher Egner via harsh)
IMPROVEMENTS
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
@ -20,11 +14,6 @@ Trunk (unreleased changes)
HADOOP-7635. RetryInvocationHandler should release underlying resources on
close (atm)
HADOOP-7668. Add a NetUtils method that can tell if an InetAddress
belongs to local host. (suresh)
HADOOP-7687 Make getProtocolSignature public (sanjay)
BUGS
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
@ -34,16 +23,6 @@ Trunk (unreleased changes)
HADOOP-7641. Add Apache License to template config files (Eric Yang via atm)
HADOOP-7621. alfredo config should be in a file not readable by users
(Alejandro Abdelnur via atm)
HADOOP-7669 Fix newly introduced release audit warning.
(Uma Maheswara Rao G via stevel)
HADOOP-6220. HttpServer wraps InterruptedExceptions by IOExceptions if interrupted
in startup (stevel)
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
@ -308,6 +287,9 @@ Release 0.23.0 - Unreleased
HADOOP-7430. Improve error message when moving to trash fails due to
quota issue. (Ravi Prakash via mattf)
HADOOP-7457. Remove out-of-date Chinese language documentation.
(Jakob Homan via eli)
HADOOP-7444. Add Checksum API to verify and calculate checksums "in bulk"
(todd)
@ -406,13 +388,6 @@ Release 0.23.0 - Unreleased
HADOOP-7599. Script improvements to setup a secure Hadoop cluster
(Eric Yang via ddas)
HADOOP-7639. Enhance HttpServer to allow passing path-specs for filtering,
so that servers like Yarn WebApp can get filtered the paths served by
their own injected servlets. (Thomas Graves via vinodkv)
HADOOP-7575. Enhanced LocalDirAllocator to support fully-qualified
paths. (Jonathan Eagles via vinodkv)
OPTIMIZATIONS
HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
@ -423,9 +398,6 @@ Release 0.23.0 - Unreleased
BUG FIXES
HADOOP-7630. hadoop-metrics2.properties should have a property *.period
set to a default value for metrics. (Eric Yang via mattf)
HADOOP-7327. FileSystem.listStatus() throws NullPointerException instead of
IOException upon access permission failure. (mattf)
@ -631,9 +603,6 @@ Release 0.23.0 - Unreleased
HADOOP-7631. Fixes a config problem to do with running streaming jobs
(Eric Yang via ddas)
HADOOP-7662. Fixed logs servlet to use the pathspec '/*' instead of '/'
for correct filtering. (Thomas Graves via vinodkv)
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
@ -1149,11 +1118,6 @@ Release 0.22.0 - Unreleased
HADOOP-7568. SequenceFile should not print into stdout.
(Plamen Jeliazkov via shv)
HADOOP-7663. Fix TestHDFSTrash failure. (Mayank Bansal via shv)
HADOOP-7457. Remove out-of-date Chinese language documentation.
(Jakob Homan via eli)
Release 0.21.1 - Unreleased
IMPROVEMENTS

View File

@ -82,12 +82,10 @@
<code>36000</code>.
</p>
<p><code>hadoop.http.authentication.signature.secret.file</code>: The signature secret
file for signing the authentication tokens. If not set a random secret is generated at
<p><code>hadoop.http.authentication.signature.secret</code>: The signature secret for
signing the authentication tokens. If not set a random secret is generated at
startup time. The same secret should be used for all nodes in the cluster, JobTracker,
NameNode, DataNode and TastTracker. The default value is
<code>${user.home}/hadoop-http-auth-signature-secret</code>.
IMPORTANT: This file should be readable only by the Unix user running the daemons.
NameNode, DataNode and TastTracker. The default value is a <code>hadoop</code> value.
</p>
<p><code>hadoop.http.authentication.cookie.domain</code>: The domain to use for the HTTP

View File

@ -1632,10 +1632,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
try {
doc =
DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
// Allow a broader set of control characters to appear in job confs.
// cf https://issues.apache.org/jira/browse/MAPREDUCE-109
doc.setXmlVersion( "1.1" );
} catch (ParserConfigurationException pe) {
throw new IOException(pe);
}

View File

@ -264,15 +264,9 @@ public class LocalDirAllocator {
Path tmpDir = new Path(localDirs[i]);
if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
try {
File tmpFile = tmpDir.isAbsolute()
? new File(localFS.makeQualified(tmpDir).toUri())
: new File(localDirs[i]);
DiskChecker.checkDir(tmpFile);
dirs.add(tmpFile.getPath());
dfList.add(new DF(tmpFile, 30000));
DiskChecker.checkDir(new File(localDirs[i]));
dirs.add(localDirs[i]);
dfList.add(new DF(new File(localDirs[i]), 30000));
} catch (DiskErrorException de) {
LOG.warn( localDirs[i] + " is not writable\n", de);
}

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.http;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.InterruptedIOException;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.URL;
@ -126,29 +125,6 @@ public class HttpServer implements FilterContainer {
this(name, bindAddress, port, findPort, conf, null, connector);
}
/**
* Create a status server on the given port. Allows you to specify the
* path specifications that this server will be serving so that they will be
* added to the filters properly.
*
* @param name The name of the server
* @param bindAddress The address for this server
* @param port The port to use on the server
* @param findPort whether the server should start at the given port and
* increment by 1 until it finds a free port.
* @param conf Configuration
* @param pathSpecs Path specifications that this httpserver will be serving.
* These will be added to any filters.
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
this(name, bindAddress, port, findPort, conf, null, null);
for (String path : pathSpecs) {
LOG.info("adding path spec: " + path);
addFilterPathMapping(path, webAppContext);
}
}
/**
* Create a status server on the given port.
* The jsp scripts are taken from src/webapps/<name>.
@ -283,7 +259,7 @@ public class HttpServer implements FilterContainer {
if (logDir != null) {
Context logContext = new Context(parent, "/logs");
logContext.setResourceBase(logDir);
logContext.addServlet(AdminAuthorizedServlet.class, "/*");
logContext.addServlet(AdminAuthorizedServlet.class, "/");
logContext.setDisplayName("logs");
setContextAttributes(logContext, conf);
defaultContexts.put(logContext, true);
@ -684,9 +660,6 @@ public class HttpServer implements FilterContainer {
}
} catch (IOException e) {
throw e;
} catch (InterruptedException e) {
throw (IOException) new InterruptedIOException(
"Interrupted while starting HTTP server").initCause(e);
} catch (Exception e) {
throw new IOException("Problem starting http server", e);
}

View File

@ -199,7 +199,7 @@ public class ProtocolSignature implements Writable {
* @param protocol protocol
* @return the server's protocol signature
*/
public static ProtocolSignature getProtocolSignature(
static ProtocolSignature getProtocolSignature(
int clientMethodsHashCode,
long serverVersion,
Class<? extends VersionedProtocol> protocol) {

View File

@ -516,25 +516,4 @@ public class NetUtils {
} catch (UnknownHostException ignore) { }
return addr;
}
/**
* Given an InetAddress, checks to see if the address is a local address, by
* comparing the address with all the interfaces on the node.
* @param addr address to check if it is local node's address
* @return true if the address corresponds to the local node
*/
public static boolean isLocalAddress(InetAddress addr) {
// Check if the address is any local or loop back
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
// Check if the address is defined on any interface
if (!local) {
try {
local = NetworkInterface.getByInetAddress(addr) != null;
} catch (SocketException e) {
local = false;
}
}
return local;
}
}

View File

@ -22,9 +22,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.HashMap;
import java.util.Map;
@ -43,9 +40,7 @@ import java.util.Map;
*/
public class AuthenticationFilterInitializer extends FilterInitializer {
static final String PREFIX = "hadoop.http.authentication.";
static final String SIGNATURE_SECRET_FILE = AuthenticationFilter.SIGNATURE_SECRET + ".file";
private static final String PREFIX = "hadoop.http.authentication.";
/**
* Initializes Alfredo AuthenticationFilter.
@ -72,25 +67,6 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
}
}
String signatureSecretFile = filterConfig.get(SIGNATURE_SECRET_FILE);
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
}
try {
StringBuilder secret = new StringBuilder();
Reader reader = new FileReader(signatureSecretFile);
int c = reader.read();
while (c > -1) {
secret.append((char)c);
c = reader.read();
}
reader.close();
filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET, secret.toString());
} catch (IOException ex) {
throw new RuntimeException("Could not read HTTP signature secret file: " + signatureSecretFile);
}
container.addFilter("authentication",
AuthenticationFilter.class.getName(),
filterConfig);

View File

@ -475,10 +475,7 @@ else
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/taskcontroller.cfg ${HADOOP_CONF_DIR}/taskcontroller.cfg
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
if [ ! -e ${HADOOP_CONF_DIR}/capacity-scheduler.xml ]; then
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
fi
if [ ! -e ${HADOOP_CONF_DIR}/hadoop-metrics2.properties ]; then
cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
template_generator ${HADOOP_PREFIX}/share/hadoop/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
fi
if [ ! -e ${HADOOP_CONF_DIR}/log4j.properties ]; then
cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/log4j.properties ${HADOOP_CONF_DIR}/log4j.properties

View File

@ -1,20 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# syntax: [prefix].[source|sink|jmx].[instance].[options]
# See package.html for org.apache.hadoop.metrics2 for details
*.period=60

View File

@ -144,26 +144,6 @@
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.principal</name>
<value>HTTP/_HOST@${local.realm}</value>
<description>
The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
HTTP SPENGO specification.
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
<description>
The Kerberos keytab file with the credentials for the
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
</description>
</property>
<property>
<name>dfs.namenode.keytab.file</name>
<value>/etc/security/keytabs/nn.service.keytab</value>

View File

@ -1,213 +0,0 @@
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some default values that can be overridden by system properties
hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
#
# Job Summary Appender
#
# Use following logger to send summary to separate file defined by
# hadoop.mapreduce.jobsummary.log.file rolled daily:
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
#
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
# Logging Threshold
log4j.threshold=ALL
#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
#
# TaskLog Appender
#
#Default values
hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
#Security appender
#
hadoop.security.log.file=SecurityAuth.audit
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#new logger
# Define some default values that can be overridden by system properties
hadoop.security.logger=INFO,console
log4j.category.SecurityLogger=${hadoop.security.logger}
# hdfs audit logging
hdfs.audit.logger=INFO,console
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
# mapred audit logging
mapred.audit.logger=INFO,console
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
#
# Rolling File Appender
#
#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
# Logfile size and and 30-day backups
#log4j.appender.RFA.MaxFileSize=1MB
#log4j.appender.RFA.MaxBackupIndex=30
#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# FSNamesystem Audit logging
# All audit events are logged at INFO level
#
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
# Custom Logging levels
#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
# Jets3t library
log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
#
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
#
# Job Summary Appender
#
log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.appender.JSA.DatePattern=.yyyy-MM-dd
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
#
# MapReduce Audit Log Appender
#
# Set the MapReduce audit log filename
#hadoop.mapreduce.audit.log.file=hadoop-mapreduce.audit.log
# Appender for AuditLogger.
# Requires the following system properties to be set
# - hadoop.log.dir (Hadoop Log directory)
# - hadoop.mapreduce.audit.log.file (MapReduce audit log filename)
#log4j.logger.org.apache.hadoop.mapred.AuditLogger=INFO,MRAUDIT
#log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
#log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
#log4j.appender.MRAUDIT.File=${hadoop.log.dir}/${hadoop.mapreduce.audit.log.file}
#log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
#log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
#log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
# Yarn ResourceManager Application Summary Log
#
# Set the ResourceManager summary log filename
#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
# Set the ResourceManager summary log level and appender
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
# Appender for ResourceManager Application Summary Log - rolled daily
# Requires the following properties to be set
# - hadoop.log.dir (Hadoop Log directory)
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd

View File

@ -808,8 +808,8 @@
</property>
<property>
<name>hadoop.http.authentication.signature.secret.file</name>
<value>${user.home}/hadoop-http-auth-signature-secret</value>
<name>hadoop.http.authentication.signature.secret</name>
<value>hadoop</value>
<description>
The signature secret for signing the authentication tokens.
If not set a random secret is generated at startup time.

View File

@ -58,7 +58,7 @@ public class TestConfiguration extends TestCase {
}
private void startConfig() throws IOException{
out.write("<?xml version=\"1.1\"?>\n");
out.write("<?xml version=\"1.0\"?>\n");
out.write("<configuration>\n");
}
@ -221,18 +221,6 @@ public class TestConfiguration extends TestCase {
assertEquals("this contains a comment", conf.get("my.comment"));
}
public void testControlAInValue() throws IOException {
out = new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("my.char", "&#1;");
appendProperty("my.string", "some&#1;string");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("\u0001", conf.get("my.char"));
assertEquals("some\u0001string", conf.get("my.string"));
}
public void testTrim() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
@ -310,7 +298,7 @@ public class TestConfiguration extends TestCase {
conf.writeXml(baos);
String result = baos.toString();
assertTrue("Result has proper header", result.startsWith(
"<?xml version=\"1.1\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>"));
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>"));
assertTrue("Result has proper footer", result.endsWith("</configuration>"));
}

View File

@ -20,18 +20,11 @@ package org.apache.hadoop.fs;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.junit.Test;
import static org.junit.Assert.*;
import junit.framework.TestCase;
/** This test LocalDirAllocator works correctly;
* Every test case uses different buffer dirs to
@ -40,15 +33,20 @@ import static org.junit.Assert.*;
* a directory can be created in a read-only directory
* which breaks this test.
*/
@RunWith(Parameterized.class)
public class TestLocalDirAllocator {
public class TestLocalDirAllocator extends TestCase {
final static private Configuration conf = new Configuration();
final static private String BUFFER_DIR_ROOT = "build/test/temp";
final static private String ABSOLUTE_DIR_ROOT;
final static private String QUALIFIED_DIR_ROOT;
final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT);
final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT);
final static private String CONTEXT = "fs.client.buffer.dir";
final static private String BUFFER_DIR[] = new String[] {
BUFFER_DIR_ROOT+"/tmp0", BUFFER_DIR_ROOT+"/tmp1", BUFFER_DIR_ROOT+"/tmp2",
BUFFER_DIR_ROOT+"/tmp3", BUFFER_DIR_ROOT+"/tmp4", BUFFER_DIR_ROOT+"/tmp5",
BUFFER_DIR_ROOT+"/tmp6"};
final static private Path BUFFER_PATH[] = new Path[] {
new Path(BUFFER_DIR[0]), new Path(BUFFER_DIR[1]), new Path(BUFFER_DIR[2]),
new Path(BUFFER_DIR[3]), new Path(BUFFER_DIR[4]), new Path(BUFFER_DIR[5]),
new Path(BUFFER_DIR[6])};
final static private String CONTEXT = "dfs.client.buffer.dir";
final static private String FILENAME = "block";
final static private LocalDirAllocator dirAllocator =
new LocalDirAllocator(CONTEXT);
@ -56,12 +54,6 @@ public class TestLocalDirAllocator {
final static private boolean isWindows =
System.getProperty("os.name").startsWith("Windows");
final static int SMALL_FILE_SIZE = 100;
final static private String RELATIVE = "/RELATIVE";
final static private String ABSOLUTE = "/ABSOLUTE";
final static private String QUALIFIED = "/QUALIFIED";
final private String ROOT;
final private String PREFIX;
static {
try {
localFs = FileSystem.getLocal(conf);
@ -71,27 +63,6 @@ public class TestLocalDirAllocator {
e.printStackTrace();
System.exit(-1);
}
ABSOLUTE_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
BUFFER_DIR_ROOT).toUri().getPath();
QUALIFIED_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
BUFFER_DIR_ROOT).toUri().toString();
}
public TestLocalDirAllocator(String root, String prefix) {
ROOT = root;
PREFIX = prefix;
}
@Parameters
public static Collection<Object[]> params() {
Object [][] data = new Object[][] {
{ BUFFER_DIR_ROOT, RELATIVE },
{ ABSOLUTE_DIR_ROOT, ABSOLUTE },
{ QUALIFIED_DIR_ROOT, QUALIFIED }
};
return Arrays.asList(data);
}
private static void rmBufferDirs() throws IOException {
@ -99,41 +70,36 @@ public class TestLocalDirAllocator {
localFs.delete(BUFFER_PATH_ROOT, true));
}
private static void validateTempDirCreation(String dir) throws IOException {
private void validateTempDirCreation(int i) throws IOException {
File result = createTempFile(SMALL_FILE_SIZE);
assertTrue("Checking for " + dir + " in " + result + " - FAILED!",
result.getPath().startsWith(new Path(dir, FILENAME).toUri().getPath()));
assertTrue("Checking for " + BUFFER_DIR[i] + " in " + result + " - FAILED!",
result.getPath().startsWith(new File(BUFFER_DIR[i], FILENAME).getPath()));
}
private static File createTempFile() throws IOException {
return createTempFile(-1);
}
private static File createTempFile(long size) throws IOException {
File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
private File createTempFile() throws IOException {
File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
result.delete();
return result;
}
private String buildBufferDir(String dir, int i) {
return dir + PREFIX + i;
private File createTempFile(long size) throws IOException {
File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
result.delete();
return result;
}
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test
public void test0() throws Exception {
if (isWindows) return;
String dir0 = buildBufferDir(ROOT, 0);
String dir1 = buildBufferDir(ROOT, 1);
try {
conf.set(CONTEXT, dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
conf.set(CONTEXT, BUFFER_DIR[0]+","+BUFFER_DIR[1]);
assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir1);
validateTempDirCreation(dir1);
validateTempDirCreation(1);
validateTempDirCreation(1);
} finally {
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
rmBufferDirs();
@ -144,17 +110,14 @@ public class TestLocalDirAllocator {
* The second dir exists & is RW
* @throws Exception
*/
@Test
public void test1() throws Exception {
if (isWindows) return;
String dir1 = buildBufferDir(ROOT, 1);
String dir2 = buildBufferDir(ROOT, 2);
try {
conf.set(CONTEXT, dir1 + "," + dir2);
assertTrue(localFs.mkdirs(new Path(dir2)));
conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]);
assertTrue(localFs.mkdirs(BUFFER_PATH[2]));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir2);
validateTempDirCreation(dir2);
validateTempDirCreation(2);
validateTempDirCreation(2);
} finally {
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
rmBufferDirs();
@ -163,13 +126,10 @@ public class TestLocalDirAllocator {
/** Two buffer dirs. Both do not exist but on a RW disk.
* Check if tmp dirs are allocated in a round-robin
*/
@Test
public void test2() throws Exception {
if (isWindows) return;
String dir2 = buildBufferDir(ROOT, 2);
String dir3 = buildBufferDir(ROOT, 3);
try {
conf.set(CONTEXT, dir2 + "," + dir3);
conf.set(CONTEXT, BUFFER_DIR[2]+","+BUFFER_DIR[3]);
// create the first file, and then figure the round-robin sequence
createTempFile(SMALL_FILE_SIZE);
@ -177,9 +137,9 @@ public class TestLocalDirAllocator {
int secondDirIdx = (firstDirIdx == 2) ? 3 : 2;
// check if tmp dirs are allocated in a round-robin manner
validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
validateTempDirCreation(buildBufferDir(ROOT, secondDirIdx));
validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
validateTempDirCreation(firstDirIdx);
validateTempDirCreation(secondDirIdx);
validateTempDirCreation(firstDirIdx);
} finally {
rmBufferDirs();
}
@ -189,27 +149,23 @@ public class TestLocalDirAllocator {
* Later disk1 becomes read-only.
* @throws Exception
*/
@Test
public void test3() throws Exception {
if (isWindows) return;
String dir3 = buildBufferDir(ROOT, 3);
String dir4 = buildBufferDir(ROOT, 4);
try {
conf.set(CONTEXT, dir3 + "," + dir4);
assertTrue(localFs.mkdirs(new Path(dir3)));
assertTrue(localFs.mkdirs(new Path(dir4)));
conf.set(CONTEXT, BUFFER_DIR[3]+","+BUFFER_DIR[4]);
assertTrue(localFs.mkdirs(BUFFER_PATH[3]));
assertTrue(localFs.mkdirs(BUFFER_PATH[4]));
// Create the first small file
// create the first file with size, and then figure the round-robin sequence
createTempFile(SMALL_FILE_SIZE);
// Determine the round-robin sequence
int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
validateTempDirCreation(buildBufferDir(ROOT, nextDirIdx));
validateTempDirCreation(nextDirIdx);
// change buffer directory 2 to be read only
new File(new Path(dir4).toUri().getPath()).setReadOnly();
validateTempDirCreation(dir3);
validateTempDirCreation(dir3);
new File(BUFFER_DIR[4]).setReadOnly();
validateTempDirCreation(3);
validateTempDirCreation(3);
} finally {
rmBufferDirs();
}
@ -227,25 +183,20 @@ public class TestLocalDirAllocator {
* @throws Exception
*/
static final int TRIALS = 100;
@Test
public void test4() throws Exception {
if (isWindows) return;
String dir5 = buildBufferDir(ROOT, 5);
String dir6 = buildBufferDir(ROOT, 6);
try {
conf.set(CONTEXT, dir5 + "," + dir6);
assertTrue(localFs.mkdirs(new Path(dir5)));
assertTrue(localFs.mkdirs(new Path(dir6)));
conf.set(CONTEXT, BUFFER_DIR[5]+","+BUFFER_DIR[6]);
assertTrue(localFs.mkdirs(BUFFER_PATH[5]));
assertTrue(localFs.mkdirs(BUFFER_PATH[6]));
int inDir5=0, inDir6=0;
for(int i = 0; i < TRIALS; ++i) {
File result = createTempFile();
if(result.getPath().startsWith(
new Path(dir5, FILENAME).toUri().getPath())) {
if(result.getPath().startsWith(new File(BUFFER_DIR[5], FILENAME).getPath())) {
inDir5++;
} else if(result.getPath().startsWith(
new Path(dir6, FILENAME).toUri().getPath())) {
} else if(result.getPath().startsWith(new File(BUFFER_DIR[6], FILENAME).getPath())) {
inDir6++;
}
result.delete();
@ -264,13 +215,10 @@ public class TestLocalDirAllocator {
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
@Test
public void testLocalPathForWriteDirCreation() throws IOException {
String dir0 = buildBufferDir(ROOT, 0);
String dir1 = buildBufferDir(ROOT, 1);
try {
conf.set(CONTEXT, dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
conf.set(CONTEXT, BUFFER_DIR[0] + "," + BUFFER_DIR[1]);
assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
BUFFER_ROOT.setReadOnly();
Path p1 =
dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
@ -290,25 +238,4 @@ public class TestLocalDirAllocator {
}
}
/** Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
@Test
public void testNoSideEffects() throws IOException {
if (isWindows) return;
String dir = buildBufferDir(ROOT, 0);
try {
conf.set(CONTEXT, dir);
File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
assertTrue(result.delete());
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
} finally {
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
rmBufferDirs();
}
}
}

View File

@ -486,9 +486,6 @@ public class TestTrash extends TestCase {
conf.set(FS_TRASH_INTERVAL_KEY, "0.2"); // 12 seconds
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
conf.set(FS_TRASH_CHECKPOINT_INTERVAL_KEY, "0.1"); // 6 seconds
FileSystem fs = FileSystem.getLocal(conf);
conf.set("fs.default.name", fs.getUri().toString());
Trash trash = new Trash(conf);
// Start Emptier in background
@ -496,6 +493,8 @@ public class TestTrash extends TestCase {
Thread emptierThread = new Thread(emptier);
emptierThread.start();
FileSystem fs = FileSystem.getLocal(conf);
conf.set("fs.defaultFS", fs.getUri().toString());
FsShell shell = new FsShell();
shell.setConf(conf);
shell.init();

View File

@ -70,21 +70,6 @@ public class HttpServerFunctionalTest extends Assert {
return createServer(TEST, conf);
}
/**
* Create but do not start the test webapp server. The test webapp dir is
* prepared/checked in advance.
* @param conf the server configuration to use
* @return the server instance
*
* @throws IOException if a problem occurs
* @throws AssertionError if a condition was not met
*/
public static HttpServer createTestServer(Configuration conf,
String[] pathSpecs) throws IOException {
prepareTestWebapp();
return createServer(TEST, conf, pathSpecs);
}
/**
* Prepare the test webapp by creating the directory from the test properties
* fail if the directory cannot be created.
@ -119,18 +104,6 @@ public class HttpServerFunctionalTest extends Assert {
throws IOException {
return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
}
/**
* Create an HttpServer instance for the given webapp
* @param webapp the webapp to work with
* @param conf the configuration to use for the server
* @param pathSpecs the paths specifications the server will service
* @return the server
* @throws IOException if it could not be created
*/
public static HttpServer createServer(String webapp, Configuration conf,
String[] pathSpecs) throws IOException {
return new HttpServer(webapp, "0.0.0.0", 0, true, conf, pathSpecs);
}
/**
* Create and start a server with the test webapp

View File

@ -1,145 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.util.Set;
import java.util.TreeSet;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestPathFilter extends HttpServerFunctionalTest {
static final Log LOG = LogFactory.getLog(HttpServer.class);
static final Set<String> RECORDS = new TreeSet<String>();
/** A very simple filter that records accessed uri's */
static public class RecordingFilter implements Filter {
private FilterConfig filterConfig = null;
public void init(FilterConfig filterConfig) {
this.filterConfig = filterConfig;
}
public void destroy() {
this.filterConfig = null;
}
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException, ServletException {
if (filterConfig == null)
return;
String uri = ((HttpServletRequest)request).getRequestURI();
LOG.info("filtering " + uri);
RECORDS.add(uri);
chain.doFilter(request, response);
}
/** Configuration for RecordingFilter */
static public class Initializer extends FilterInitializer {
public Initializer() {}
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("recording", RecordingFilter.class.getName(), null);
}
}
}
/** access a url, ignoring some IOException such as the page does not exist */
static void access(String urlstring) throws IOException {
LOG.warn("access " + urlstring);
URL url = new URL(urlstring);
URLConnection connection = url.openConnection();
connection.connect();
try {
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream()));
try {
for(; in.readLine() != null; );
} finally {
in.close();
}
} catch(IOException ioe) {
LOG.warn("urlstring=" + urlstring, ioe);
}
}
@Test
public void testPathSpecFilters() throws Exception {
Configuration conf = new Configuration();
//start a http server with CountingFilter
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
RecordingFilter.Initializer.class.getName());
String[] pathSpecs = { "/path", "/path/*" };
HttpServer http = createTestServer(conf, pathSpecs);
http.start();
final String baseURL = "/path";
final String baseSlashURL = "/path/";
final String addedURL = "/path/nodes";
final String addedSlashURL = "/path/nodes/";
final String longURL = "/path/nodes/foo/job";
final String rootURL = "/";
final String allURL = "/*";
final String[] filteredUrls = {baseURL, baseSlashURL, addedURL,
addedSlashURL, longURL};
final String[] notFilteredUrls = {rootURL, allURL};
// access the urls and verify our paths specs got added to the
// filters
final String prefix = "http://localhost:" + http.getPort();
try {
for(int i = 0; i < filteredUrls.length; i++) {
access(prefix + filteredUrls[i]);
}
for(int i = 0; i < notFilteredUrls.length; i++) {
access(prefix + notFilteredUrls[i]);
}
} finally {
http.stop();
}
LOG.info("RECORDS = " + RECORDS);
//verify records
for(int i = 0; i < filteredUrls.length; i++) {
assertTrue(RECORDS.remove(filteredUrls[i]));
}
assertTrue(RECORDS.isEmpty());
}
}

View File

@ -18,17 +18,13 @@
package org.apache.hadoop.net;
import org.junit.Test;
import static org.junit.Assert.*;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.Socket;
import java.net.ConnectException;
import java.net.SocketException;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Enumeration;
import org.apache.hadoop.conf.Configuration;
@ -92,32 +88,4 @@ public class TestNetUtils {
fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
}
}
/**
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
*/
@Test
public void testIsLocalAddress() throws Exception {
// Test - local host is local address
assertTrue(NetUtils.isLocalAddress(InetAddress.getLocalHost()));
// Test - all addresses bound network interface is local address
Enumeration<NetworkInterface> interfaces = NetworkInterface
.getNetworkInterfaces();
if (interfaces != null) { // Iterate through all network interfaces
while (interfaces.hasMoreElements()) {
NetworkInterface i = interfaces.nextElement();
Enumeration<InetAddress> addrs = i.getInetAddresses();
if (addrs == null) {
continue;
}
// Iterate through all the addresses of a network interface
while (addrs.hasMoreElements()) {
InetAddress addr = addrs.nextElement();
assertTrue(NetUtils.isLocalAddress(addr));
}
}
}
assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
}
}

View File

@ -25,29 +25,15 @@ import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.io.FileWriter;
import java.io.Writer;
import java.util.Map;
public class TestAuthenticationFilter extends TestCase {
@SuppressWarnings("unchecked")
public void testConfiguration() throws Exception {
public void testConfiguration() {
Configuration conf = new Configuration();
conf.set("hadoop.http.authentication.foo", "bar");
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
testDir.mkdirs();
File secretFile = new File(testDir, "http-secret.txt");
Writer writer = new FileWriter(new File(testDir, "http-secret.txt"));
writer.write("hadoop");
writer.close();
conf.set(AuthenticationFilterInitializer.PREFIX +
AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE,
secretFile.getAbsolutePath());
FilterContainer container = Mockito.mock(FilterContainer.class);
Mockito.doAnswer(
new Answer() {

View File

@ -16,9 +16,6 @@ Trunk (unreleased changes)
HDFS-2318. Provide authentication to webhdfs using SPNEGO and delegation
tokens. (szetszwo)
HDFS-2340. Support getFileBlockLocations and getDelegationToken in webhdfs.
(szetszwo)
IMPROVEMENTS
HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
@ -38,18 +35,6 @@ Trunk (unreleased changes)
not use ArrayWritable for writing non-array items. (Uma Maheswara Rao G
via szetszwo)
HDFS-2351 Change Namenode and Datanode to register each of their protocols
seperately. (Sanjay Radia)
HDFS-2356. Support case insensitive query parameter names in webhdfs.
(szetszwo)
HDFS-2368. Move SPNEGO conf properties from hdfs-default.xml to
hdfs-site.xml. (szetszwo)
HDFS-2355. Federation: enable using the same configuration file across
all the nodes in the cluster. (suresh)
BUG FIXES
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
@ -72,17 +57,6 @@ Trunk (unreleased changes)
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
Rao G via szetszwo)
HDFS-46. Change default namespace quota of root directory from
Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction.
(szetszwo)
HDFS-2373. Commands using webhdfs and hftp print unnecessary debug
info on the console with security enabled. (Arpit Gupta via suresh)
HDFS-2361. hftp is broken, fixed username checks in JspHelper. (jitendra)
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
@ -765,12 +739,6 @@ Release 0.23.0 - Unreleased
HDFS-1217. Change some NameNode methods from public to package private.
(Laxman via szetszwo)
HDFS-2332. Add test for HADOOP-7629 (using an immutable FsPermission
object as an RPC parameter fails). (todd)
HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
to BlockManager. (Uma Maheswara Rao G via szetszwo)
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@ -1639,11 +1607,7 @@ Release 0.22.0 - Unreleased
HDFS-2232. Generalize regular expressions in TestHDFSCLI.
(Plamen Jeliazkov via shv)
HDFS-2290. Block with corrupt replica is not getting replicated.
(Benoy Antony via shv)
Release 0.21.1 - Unreleased
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
HDFS-874. TestHDFSFileContextMainOperations fails on weirdly

View File

@ -244,6 +244,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max";
public static final int DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
// HA related configuration
public static final String DFS_HA_NAMENODE_IDS_KEY = "dfs.ha.namenode.ids";
public static final String DFS_HA_NAMENODE_IDS_DEFAULT = "";
// property for fsimage compression
public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress";

View File

@ -38,7 +38,6 @@ import java.util.Random;
import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
@ -64,6 +63,7 @@ import org.apache.hadoop.security.UserGroupInformation;
@InterfaceAudience.Private
public class DFSUtil {
private DFSUtil() { /* Hidden constructor */ }
private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
@Override
protected Random initialValue() {
@ -577,6 +577,17 @@ public class DFSUtil {
}
}
/**
* Returns the configured nameservice Id
*
* @param conf
* Configuration object to lookup the nameserviceId
* @return nameserviceId string from conf
*/
public static String getNameServiceId(Configuration conf) {
return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
}
/** Return used as percentage of capacity */
public static float getPercentUsed(long used, long capacity) {
return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity;
@ -696,77 +707,4 @@ public class DFSUtil {
// TODO:HA configuration changes pending
return false;
}
/**
* Get name service Id for the {@link NameNode} based on namenode RPC address
* matching the local node address.
*/
public static String getNamenodeNameServiceId(Configuration conf) {
return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
}
/**
* Get name service Id for the BackupNode based on backup node RPC address
* matching the local node address.
*/
public static String getBackupNameServiceId(Configuration conf) {
return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
}
/**
* Get name service Id for the secondary node based on secondary http address
* matching the local node address.
*/
public static String getSecondaryNameServiceId(Configuration conf) {
return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
}
/**
* Get the nameservice Id by matching the {@code addressKey} with the
* the address of the local node.
*
* If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
* configured, this method determines the nameservice Id by matching the local
* nodes address with the configured addresses. When a match is found, it
* returns the nameservice Id from the corresponding configuration key.
*
* @param conf Configuration
* @param addressKey configuration key to get the address.
* @return name service Id on success, null on failure.
* @throws HadoopIllegalArgumentException on error
*/
private static String getNameServiceId(Configuration conf, String addressKey) {
String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
if (nameserviceId != null) {
return nameserviceId;
}
Collection<String> ids = getNameServiceIds(conf);
if (ids == null || ids.size() == 0) {
// Not federation configuration, hence no nameservice Id
return null;
}
// Match the rpc address with that of local address
int found = 0;
for (String id : ids) {
String addr = conf.get(getNameServiceIdKey(addressKey, id));
InetSocketAddress s = NetUtils.createSocketAddr(addr);
if (NetUtils.isLocalAddress(s.getAddress())) {
nameserviceId = id;
found++;
}
}
if (found > 1) { // Only one address must match the local address
throw new HadoopIllegalArgumentException(
"Configuration has multiple RPC addresses that matches "
+ "the local node's address. Please configure the system with "
+ "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
}
if (found == 0) {
throw new HadoopIllegalArgumentException("Configuration address "
+ addressKey + " is missing in configuration with name service Id");
}
return nameserviceId;
}
}

View File

@ -116,26 +116,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
this.hostName = hostName;
}
/** Constructor */
public DatanodeInfo(final String name, final String storageID,
final int infoPort, final int ipcPort,
final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
final String networkLocation, final String hostName,
final AdminStates adminState) {
super(name, storageID, infoPort, ipcPort);
this.capacity = capacity;
this.dfsUsed = dfsUsed;
this.remaining = remaining;
this.blockPoolUsed = blockPoolUsed;
this.lastUpdate = lastUpdate;
this.xceiverCount = xceiverCount;
this.location = networkLocation;
this.hostName = hostName;
this.adminState = adminState;
}
/** The raw capacity. */
public long getCapacity() { return capacity; }

View File

@ -308,11 +308,6 @@ public class BlockManager {
/** Dump meta data to out. */
public void metaSave(PrintWriter out) {
assert namesystem.hasWriteLock();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
datanodeManager.fetchDatanodes(live, dead, false);
out.println("Live Datanodes: " + live.size());
out.println("Dead Datanodes: " + dead.size());
//
// Dump contents of neededReplication
//
@ -847,7 +842,7 @@ public class BlockManager {
// Add this replica to corruptReplicas Map
corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
if (countNodes(storedBlock).liveReplicas() > inode.getReplication()) {
// the block is over-replicated so invalidate the replicas immediately
invalidateBlock(storedBlock, node);
} else if (namesystem.isPopulatingReplQueues()) {
@ -872,7 +867,7 @@ public class BlockManager {
// Check how many copies we have of the block. If we have at least one
// copy on a live node, then we can delete it.
int count = countNodes(blk).liveReplicas();
if (count >= 1) {
if (count > 1) {
addToInvalidates(blk, dn);
removeStoredBlock(blk, node);
if(NameNode.stateChangeLog.isDebugEnabled()) {

View File

@ -54,13 +54,11 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
@ -70,7 +68,7 @@ import org.apache.hadoop.util.VersionInfo;
public class JspHelper {
public static final String CURRENT_CONF = "current.conf";
final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
public static final String DELEGATION_PARAMETER_NAME = "delegation";
public static final String NAMENODE_ADDRESS = "nnaddr";
static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
"=";
@ -553,8 +551,7 @@ public class JspHelper {
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
ugi = id.getUser();
checkUsername(ugi.getShortUserName(), usernameFromQuery);
checkUsername(ugi.getShortUserName(), user);
checkUsername(ugi.getUserName(), user);
ugi.addToken(token);
ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
} else {
@ -563,11 +560,13 @@ public class JspHelper {
"authenticated by filter");
}
ugi = UserGroupInformation.createRemoteUser(user);
checkUsername(ugi.getShortUserName(), usernameFromQuery);
// This is not necessarily true, could have been auth'ed by user-facing
// filter
ugi.setAuthenticationMethod(secureAuthMethod);
}
checkUsername(user, usernameFromQuery);
} else { // Security's not on, pull from url
ugi = usernameFromQuery == null?
getDefaultWebUser(conf) // not specified in request
@ -580,18 +579,10 @@ public class JspHelper {
return ugi;
}
/**
* Expected user name should be a short name.
*/
private static void checkUsername(final String expected, final String name
) throws IOException {
if (name == null) {
return;
}
KerberosName u = new KerberosName(name);
String shortName = u.getShortName();
if (!shortName.equals(expected)) {
throw new IOException("Usernames not matched: name=" + shortName
if (name != null && !name.equals(expected)) {
throw new IOException("Usernames not matched: name=" + name
+ " != expected=" + expected);
}
}

View File

@ -425,7 +425,7 @@ public class DataNode extends Configured
private List<ServicePlugin> plugins;
// For InterDataNodeProtocol
public RPC.Server ipcServer;
public Server ipcServer;
private SecureResources secureResources = null;
private AbstractList<File> dataDirs;
@ -575,15 +575,11 @@ public class DataNode extends Configured
private void initIpcServer(Configuration conf) throws IOException {
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
conf.get("dfs.datanode.ipc.address"));
// Add all the RPC protocols that the Datanode implements
ipcServer = RPC.getServer(ClientDatanodeProtocol.class, this, ipcAddr.getHostName(),
ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(),
ipcAddr.getPort(),
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
DFS_DATANODE_HANDLER_COUNT_DEFAULT),
false, conf, blockPoolTokenSecretManager);
ipcServer.addProtocol(InterDatanodeProtocol.class, this);
// set service-level authorization security policy
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {

View File

@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@ -67,11 +66,8 @@ import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import com.sun.jersey.spi.container.ResourceFilters;
/** Web-hdfs DataNode implementation. */
@Path("")
@ResourceFilters(ParamFilter.class)
public class DatanodeWebHdfsMethods {
public static final Log LOG = LogFactory.getLog(DatanodeWebHdfsMethods.class);

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@ -81,13 +80,13 @@ public class BackupNode extends NameNode {
// Common NameNode methods implementation for backup node.
/////////////////////////////////////////////////////
@Override // NameNode
protected InetSocketAddress getRpcServerAddress(Configuration conf) {
protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
String addr = conf.get(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT);
return NetUtils.createSocketAddr(addr);
}
@Override
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) throws IOException {
String addr = conf.get(BN_SERVICE_RPC_ADDRESS_KEY);
if (addr == null || addr.isEmpty()) {
return null;
@ -135,6 +134,11 @@ public class BackupNode extends NameNode {
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
NamespaceInfo nsInfo = handshake(conf);
super.initialize(conf);
// Backup node should never do lease recovery,
// therefore lease hard limit should never expire.
namesystem.leaseManager.setLeasePeriod(
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
clusterId = nsInfo.getClusterID();
blockPoolId = nsInfo.getBlockPoolID();
@ -368,9 +372,4 @@ public class BackupNode extends NameNode {
throw new UnsupportedActionException(msg);
}
}
@Override
protected String getNameServiceId(Configuration conf) {
return DFSUtil.getBackupNameServiceId(conf);
}
}

View File

@ -120,7 +120,7 @@ public class FSDirectory implements Closeable {
this.cond = dirLock.writeLock().newCondition();
rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
ns.createFsOwnerPermissions(new FsPermission((short)0755)),
Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
this.fsImage = fsImage;
int configuredLimit = conf.getInt(
DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);

View File

@ -130,6 +130,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
@ -346,30 +347,28 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
dir.imageLoadComplete();
}
void startSecrectManager() throws IOException {
void activateSecretManager() throws IOException {
if (dtSecretManager != null) {
dtSecretManager.startThreads();
}
}
void stopSecretManager() {
if (dtSecretManager != null) {
dtSecretManager.stopThreads();
}
}
/**
* Start services common to both active and standby states
* @throws IOException
* Activate FSNamesystem daemons.
*/
void startCommonServices(Configuration conf) throws IOException {
void activate(Configuration conf) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
setBlockTotal();
blockManager.activate(conf);
this.lmthread = new Daemon(leaseManager.new Monitor());
lmthread.start();
this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
nnrmthread.start();
} finally {
@ -380,69 +379,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
DefaultMetricsSystem.instance().register(this);
}
/**
* Stop services common to both active and standby states
* @throws IOException
*/
void stopCommonServices() {
writeLock();
try {
if (blockManager != null) blockManager.close();
if (nnrmthread != null) nnrmthread.interrupt();
} finally {
writeUnlock();
}
}
/**
* Start services required in active state
* @throws IOException
*/
void startActiveServices() throws IOException {
LOG.info("Starting services required for active state");
writeLock();
try {
startSecrectManager();
lmthread = new Daemon(leaseManager.new Monitor());
lmthread.start();
} finally {
writeUnlock();
}
}
/**
* Start services required in active state
* @throws InterruptedException
*/
void stopActiveServices() {
LOG.info("Stopping services started for active state");
writeLock();
try {
stopSecretManager();
if (lmthread != null) {
try {
lmthread.interrupt();
lmthread.join(3000);
} catch (InterruptedException ie) {
LOG.warn("Encountered exception ", ie);
}
lmthread = null;
}
} finally {
writeUnlock();
}
}
/** Start services required in standby state */
void startStandbyServices() {
LOG.info("Starting services required for standby state");
}
/** Stop services required in standby state */
void stopStandbyServices() {
LOG.info("Stopping services started for standby state");
}
public static Collection<URI> getNamespaceDirs(Configuration conf) {
return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
}
@ -566,7 +502,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
/**
* Version of @see #getNamespaceInfo() that is not protected by a lock.
* Version of {@see #getNamespaceInfo()} that is not protected by a lock.
*/
NamespaceInfo unprotectedGetNamespaceInfo() {
return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
@ -583,16 +519,23 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
void close() {
fsRunning = false;
try {
stopCommonServices();
if (blockManager != null) blockManager.close();
if (smmthread != null) smmthread.interrupt();
if (dtSecretManager != null) dtSecretManager.stopThreads();
if (nnrmthread != null) nnrmthread.interrupt();
} catch (Exception e) {
LOG.warn("Exception shutting down FSNamesystem", e);
} finally {
// using finally to ensure we also wait for lease daemon
try {
stopActiveServices();
stopStandbyServices();
if (lmthread != null) {
lmthread.interrupt();
lmthread.join(3000);
}
if (dir != null) {
dir.close();
}
} catch (InterruptedException ie) {
} catch (IOException ie) {
LOG.error("Error closing FSDirectory", ie);
IOUtils.cleanup(LOG, dir);
@ -621,6 +564,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
out.println(totalInodes + " files and directories, " + totalBlocks
+ " blocks = " + (totalInodes + totalBlocks) + " total");
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, dead, false);
out.println("Live Datanodes: "+live.size());
out.println("Dead Datanodes: "+dead.size());
blockManager.metaSave(out);
out.flush();
@ -1443,7 +1391,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
try {
lb = startFileInternal(src, null, holder, clientMachine,
EnumSet.of(CreateFlag.APPEND),
false, blockManager.maxReplication, 0);
false, blockManager.maxReplication, (long)0);
} finally {
writeUnlock();
}
@ -1526,7 +1474,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
fileLength = pendingFile.computeContentSummary().getLength();
blockSize = pendingFile.getPreferredBlockSize();
clientNode = pendingFile.getClientNode();
replication = pendingFile.getReplication();
replication = (int)pendingFile.getReplication();
} finally {
writeUnlock();
}
@ -2321,7 +2269,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
INodeFileUnderConstruction pendingFile) {
INodeFileUnderConstruction pendingFile) throws IOException {
assert hasWriteLock();
pendingFile.setClientName(newHolder);
return leaseManager.reassignLease(lease, src, newHolder);
@ -2926,9 +2874,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @return true if in safe mode
*/
private synchronized boolean isOn() {
try {
assert isConsistent() : " SafeMode: Inconsistent filesystem state: "
+ "Total num of blocks, active blocks, or "
+ "total safe blocks don't match.";
} catch(IOException e) {
System.err.print(StringUtils.stringifyException(e));
}
return this.reached >= 0;
}
@ -3082,7 +3034,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
this.blockTotal = total;
this.blockThreshold = (int) (blockTotal * threshold);
this.blockReplQueueThreshold =
(int) (blockTotal * replQueueThreshold);
(int) (((double) blockTotal) * replQueueThreshold);
checkMode();
}
@ -3092,7 +3044,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @param replication current replication
*/
private synchronized void incrementSafeBlockCount(short replication) {
if (replication == safeReplication)
if ((int)replication == safeReplication)
this.blockSafe++;
checkMode();
}
@ -3225,7 +3177,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Checks consistency of the class state.
* This is costly and currently called only in assert.
*/
private boolean isConsistent() {
private boolean isConsistent() throws IOException {
if (blockTotal == -1 && blockSafe == -1) {
return true; // manual safe mode
}

View File

@ -27,7 +27,6 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HealthCheckFailedException;
@ -38,13 +37,15 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Trash;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@ -53,6 +54,9 @@ import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
@ -167,18 +171,19 @@ public class NameNode {
}
}
public static final int DEFAULT_PORT = 8020;
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
public static final HAState ACTIVE_STATE = new ActiveState();
public static final HAState STANDBY_STATE = new StandbyState();
protected FSNamesystem namesystem;
protected final Configuration conf;
protected NamenodeRole role;
private HAState state;
private final boolean haEnabled;
private final HAContext haContext;
/** httpServer */
@ -307,11 +312,12 @@ public class NameNode {
* Given a configuration get the address of the service rpc server
* If the service rpc is not configured returns null
*/
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf)
throws IOException {
return NameNode.getServiceAddress(conf, false);
}
protected InetSocketAddress getRpcServerAddress(Configuration conf) {
protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
return getAddress(conf);
}
@ -374,6 +380,7 @@ public class NameNode {
* @param conf the configuration
*/
protected void initialize(Configuration conf) throws IOException {
initializeGenericKeys(conf);
UserGroupInformation.setConfiguration(conf);
loginAsNameNodeUser(conf);
@ -389,7 +396,7 @@ public class NameNode {
throw e;
}
startCommonServices(conf);
activate(conf);
}
/**
@ -423,10 +430,19 @@ public class NameNode {
}
}
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
namesystem.startCommonServices(conf);
/**
* Activate name-node servers and threads.
*/
void activate(Configuration conf) throws IOException {
if ((isRole(NamenodeRole.NAMENODE))
&& (UserGroupInformation.isSecurityEnabled())) {
namesystem.activateSecretManager();
}
namesystem.activate(conf);
startHttpServer(conf);
rpcServer.start();
startTrashEmptier(conf);
plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
ServicePlugin.class);
for (ServicePlugin p: plugins) {
@ -436,27 +452,11 @@ public class NameNode {
LOG.warn("ServicePlugin " + p + " could not be started", t);
}
}
LOG.info(getRole() + " up at: " + rpcServer.getRpcAddress());
if (rpcServer.getServiceRpcAddress() != null) {
LOG.info(getRole() + " service server is up at: "
+ rpcServer.getServiceRpcAddress());
LOG.info(getRole() + " service server is up at: " + rpcServer.getServiceRpcAddress());
}
startHttpServer(conf);
}
private void stopCommonServices() {
if(namesystem != null) namesystem.close();
if(rpcServer != null) rpcServer.stop();
if (plugins != null) {
for (ServicePlugin p : plugins) {
try {
p.stop();
} catch (Throwable t) {
LOG.warn("ServicePlugin " + p + " could not be stopped", t);
}
}
}
stopHttpServer();
}
private void startTrashEmptier(Configuration conf) throws IOException {
@ -470,27 +470,12 @@ public class NameNode {
this.emptier.start();
}
private void stopTrashEmptier() {
if (this.emptier != null) {
emptier.interrupt();
emptier = null;
}
}
private void startHttpServer(final Configuration conf) throws IOException {
httpServer = new NameNodeHttpServer(conf, this, getHttpServerAddress(conf));
httpServer.start();
setHttpServerAddress(conf);
}
private void stopHttpServer() {
try {
if (httpServer != null) httpServer.stop();
} catch (Exception e) {
LOG.error("Exception while stopping httpserver", e);
}
}
/**
* Start NameNode.
* <p>
@ -516,7 +501,7 @@ public class NameNode {
* <code>zero</code> in the conf.
*
* @param conf confirguration
* @throws IOException on error
* @throws IOException
*/
public NameNode(Configuration conf) throws IOException {
this(conf, NamenodeRole.NAMENODE);
@ -524,28 +509,14 @@ public class NameNode {
protected NameNode(Configuration conf, NamenodeRole role)
throws IOException {
this.conf = conf;
this.role = role;
this.haEnabled = DFSUtil.isHAEnabled(conf);
this.haContext = new NameNodeHAContext();
this.state = !haEnabled ? ACTIVE_STATE : STANDBY_STATE;
try {
initializeGenericKeys(conf, getNameServiceId(conf));
initialize(conf);
if (!haEnabled) {
state = ACTIVE_STATE;
} else {
state = STANDBY_STATE;;
}
state.enterState(haContext);
} catch (IOException e) {
this.stop();
throw e;
} catch (ServiceFailedException e) {
this.stop();
throw new IOException("Service failed to start", e);
} catch (HadoopIllegalArgumentException e) {
this.stop();
throw e;
}
}
@ -557,7 +528,6 @@ public class NameNode {
try {
this.rpcServer.join();
} catch (InterruptedException ie) {
LOG.info("Caught interrupted exception " + ie);
}
}
@ -570,12 +540,23 @@ public class NameNode {
return;
stopRequested = true;
}
if (plugins != null) {
for (ServicePlugin p : plugins) {
try {
state.exitState(haContext);
} catch (ServiceFailedException e) {
LOG.info("Encountered exception while exiting state " + e);
p.stop();
} catch (Throwable t) {
LOG.warn("ServicePlugin " + p + " could not be stopped", t);
}
stopCommonServices();
}
}
try {
if (httpServer != null) httpServer.stop();
} catch (Exception e) {
LOG.error("Exception while stopping httpserver", e);
}
if(namesystem != null) namesystem.close();
if(emptier != null) emptier.interrupt();
if(rpcServer != null) rpcServer.stop();
if (metrics != null) {
metrics.shutdown();
}
@ -840,16 +821,16 @@ public class NameNode {
* @param conf
* Configuration object to lookup specific key and to set the value
* to the key passed. Note the conf object is modified
* @param nameserviceId name service Id
* @see DFSUtil#setGenericConf(Configuration, String, String...)
*/
public static void initializeGenericKeys(Configuration conf, String
nameserviceId) {
public static void initializeGenericKeys(Configuration conf) {
final String nameserviceId = DFSUtil.getNameServiceId(conf);
if ((nameserviceId == null) || nameserviceId.isEmpty()) {
return;
}
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@ -857,14 +838,6 @@ public class NameNode {
}
}
/**
* Get the name service Id for the node
* @return name service Id or null if federation is not configured
*/
protected String getNameServiceId(Configuration conf) {
return DFSUtil.getNamenodeNameServiceId(conf);
}
/**
*/
public static void main(String argv[]) throws Exception {
@ -891,56 +864,27 @@ public class NameNode {
if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled");
}
state.setState(haContext, ACTIVE_STATE);
state.setState(this, ACTIVE_STATE);
}
synchronized void transitionToStandby() throws ServiceFailedException {
if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled");
}
state.setState(haContext, STANDBY_STATE);
state.setState(this, STANDBY_STATE);
}
/** Check if an operation of given category is allowed */
protected synchronized void checkOperation(final OperationCategory op)
throws UnsupportedActionException {
state.checkOperation(haContext, op);
state.checkOperation(this, op);
}
/**
* Class used as expose {@link NameNode} as context to {@link HAState}
*/
private class NameNodeHAContext implements HAContext {
@Override
public void setState(HAState s) {
state = s;
}
@Override
public HAState getState() {
public synchronized HAState getState() {
return state;
}
@Override
public void startActiveServices() throws IOException {
namesystem.startActiveServices();
startTrashEmptier(conf);
}
@Override
public void stopActiveServices() throws IOException {
namesystem.stopActiveServices();
stopTrashEmptier();
}
@Override
public void startStandbyServices() throws IOException {
// TODO:HA Start reading editlog from active
}
@Override
public void stopStandbyServices() throws IOException {
// TODO:HA Stop reading editlog from active
}
public synchronized void setState(final HAState s) {
state = s;
}
}

View File

@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@ -146,17 +145,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
serviceRpcServer = null;
serviceRPCAddress = null;
}
// Add all the RPC protocols that the namenode implements
this.server = RPC.getServer(ClientProtocol.class, this,
this.server = RPC.getServer(NamenodeProtocols.class, this,
socAddr.getHostName(), socAddr.getPort(),
handlerCount, false, conf,
namesystem.getDelegationTokenSecretManager());
this.server.addProtocol(DatanodeProtocol.class, this);
this.server.addProtocol(NamenodeProtocol.class, this);
this.server.addProtocol(RefreshAuthorizationPolicyProtocol.class, this);
this.server.addProtocol(RefreshUserMappingsProtocol.class, this);
this.server.addProtocol(GetUserMappingsProtocol.class, this);
// set service-level authorization security policy
if (serviceAuthEnabled =
@ -979,11 +971,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
private static String getClientMachine() {
String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
if (clientMachine == null) { //not a web client
clientMachine = Server.getRemoteAddress();
}
if (clientMachine == null) { //not a RPC client
String clientMachine = Server.getRemoteAddress();
if (clientMachine == null) {
clientMachine = "";
}
return clientMachine;

View File

@ -38,12 +38,10 @@ import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -175,17 +173,12 @@ public class SecondaryNameNode implements Runnable {
public SecondaryNameNode(Configuration conf,
CommandLineOpts commandLineOpts) throws IOException {
try {
NameNode.initializeGenericKeys(conf,
DFSUtil.getSecondaryNameServiceId(conf));
NameNode.initializeGenericKeys(conf);
initialize(conf, commandLineOpts);
} catch(IOException e) {
shutdown();
LOG.fatal("Failed to start secondary namenode. ", e);
throw e;
} catch(HadoopIllegalArgumentException e) {
shutdown();
LOG.fatal("Failed to start secondary namenode. ", e);
throw e;
}
}

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@ -35,35 +33,27 @@ public class ActiveState extends HAState {
}
@Override
public void checkOperation(HAContext context, OperationCategory op)
public void checkOperation(NameNode nn, OperationCategory op)
throws UnsupportedActionException {
return; // Other than journal all operations are allowed in active state
}
@Override
public void setState(HAContext context, HAState s) throws ServiceFailedException {
public void setState(NameNode nn, HAState s) throws ServiceFailedException {
if (s == NameNode.STANDBY_STATE) {
setStateInternal(context, s);
setStateInternal(nn, s);
return;
}
super.setState(context, s);
super.setState(nn, s);
}
@Override
public void enterState(HAContext context) throws ServiceFailedException {
try {
context.startActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to start active services", e);
}
protected void enterState(NameNode nn) throws ServiceFailedException {
// TODO:HA
}
@Override
public void exitState(HAContext context) throws ServiceFailedException {
try {
context.stopActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to stop active services", e);
}
protected void exitState(NameNode nn) throws ServiceFailedException {
// TODO:HA
}
}

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
@ -43,38 +44,38 @@ abstract public class HAState {
* @param s new state
* @throws ServiceFailedException on failure to transition to new state.
*/
protected final void setStateInternal(final HAContext context, final HAState s)
protected final void setStateInternal(final NameNode nn, final HAState s)
throws ServiceFailedException {
exitState(context);
context.setState(s);
s.enterState(context);
exitState(nn);
nn.setState(s);
s.enterState(nn);
}
/**
* Method to be overridden by subclasses to perform steps necessary for
* entering a state.
* @param context HA context
* @param nn Namenode
* @throws ServiceFailedException on failure to enter the state.
*/
public abstract void enterState(final HAContext context)
protected abstract void enterState(final NameNode nn)
throws ServiceFailedException;
/**
* Method to be overridden by subclasses to perform steps necessary for
* exiting a state.
* @param context HA context
* @param nn Namenode
* @throws ServiceFailedException on failure to enter the state.
*/
public abstract void exitState(final HAContext context)
protected abstract void exitState(final NameNode nn)
throws ServiceFailedException;
/**
* Move from the existing state to a new state
* @param context HA context
* @param nn Namenode
* @param s new state
* @throws ServiceFailedException on failure to transition to new state.
*/
public void setState(HAContext context, HAState s) throws ServiceFailedException {
public void setState(NameNode nn, HAState s) throws ServiceFailedException {
if (this == s) { // Aleady in the new state
return;
}
@ -84,15 +85,15 @@ abstract public class HAState {
/**
* Check if an operation is supported in a given state.
* @param context HA context
* @param nn Namenode
* @param op Type of the operation.
* @throws UnsupportedActionException if a given type of operation is not
* supported in this state.
*/
public void checkOperation(final HAContext context, final OperationCategory op)
public void checkOperation(final NameNode nn, final OperationCategory op)
throws UnsupportedActionException {
String msg = "Operation category " + op + " is not supported in state "
+ context.getState();
+ nn.getState();
throw new UnsupportedActionException(msg);
}

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -39,30 +37,22 @@ public class StandbyState extends HAState {
}
@Override
public void setState(HAContext context, HAState s) throws ServiceFailedException {
public void setState(NameNode nn, HAState s) throws ServiceFailedException {
if (s == NameNode.ACTIVE_STATE) {
setStateInternal(context, s);
setStateInternal(nn, s);
return;
}
super.setState(context, s);
super.setState(nn, s);
}
@Override
public void enterState(HAContext context) throws ServiceFailedException {
try {
context.startStandbyServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to start standby services", e);
}
protected void enterState(NameNode nn) throws ServiceFailedException {
// TODO:HA
}
@Override
public void exitState(HAContext context) throws ServiceFailedException {
try {
context.stopStandbyServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to stop standby services", e);
}
protected void exitState(NameNode nn) throws ServiceFailedException {
// TODO:HA
}
}

View File

@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@ -79,7 +78,6 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
@ -91,20 +89,10 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import com.sun.jersey.spi.container.ResourceFilters;
/** Web-hdfs NameNode implementation. */
@Path("")
@ResourceFilters(ParamFilter.class)
public class NamenodeWebHdfsMethods {
public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>();
/** @return the remote client address. */
public static String getRemoteAddress() {
return REMOTE_ADDRESS.get();
}
private static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
private @Context ServletContext context;
private @Context HttpServletRequest request;
@ -227,8 +215,6 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
final String fullpath = path.getAbsolutePath();
final NameNode namenode = (NameNode)context.getAttribute("name.node");
@ -285,10 +271,6 @@ public class NamenodeWebHdfsMethods {
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
} finally {
REMOTE_ADDRESS.set(null);
}
}
});
@ -319,8 +301,6 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
final String fullpath = path.getAbsolutePath();
final NameNode namenode = (NameNode)context.getAttribute("name.node");
@ -334,10 +314,6 @@ public class NamenodeWebHdfsMethods {
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
} finally {
REMOTE_ADDRESS.set(null);
}
}
});
@ -359,12 +335,10 @@ public class NamenodeWebHdfsMethods {
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException, InterruptedException {
return get(ugi, delegation, ROOT, op, offset, length, renewer, bufferSize);
return get(ugi, delegation, ROOT, op, offset, length, bufferSize);
}
/** Handle HTTP GET request. */
@ -382,23 +356,19 @@ public class NamenodeWebHdfsMethods {
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
+ Param.toSortedString(", ", offset, length, renewer, bufferSize));
+ Param.toSortedString(", ", offset, length, bufferSize));
}
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final String fullpath = path.getAbsolutePath();
@ -411,15 +381,6 @@ public class NamenodeWebHdfsMethods {
op.getValue(), offset.getValue(), offset, length, bufferSize);
return Response.temporaryRedirect(uri).build();
}
case GETFILEBLOCKLOCATIONS:
{
final long offsetValue = offset.getValue();
final Long lengthValue = length.getValue();
final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
final String js = JsonUtil.toJsonString(locatedblocks);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETFILESTATUS:
{
final HdfsFileStatus status = np.getFileInfo(fullpath);
@ -431,19 +392,8 @@ public class NamenodeWebHdfsMethods {
final StreamingOutput streaming = getListingStream(np, fullpath);
return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
}
case GETDELEGATIONTOKEN:
{
final Token<? extends TokenIdentifier> token = generateDelegationToken(
namenode, ugi, renewer.getValue());
final String js = JsonUtil.toJsonString(token);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
} finally {
REMOTE_ADDRESS.set(null);
}
}
});
@ -512,9 +462,6 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final String fullpath = path.getAbsolutePath();
@ -528,10 +475,6 @@ public class NamenodeWebHdfsMethods {
default:
throw new UnsupportedOperationException(op + " is not supported");
}
} finally {
REMOTE_ADDRESS.set(null);
}
}
});
}

View File

@ -149,9 +149,7 @@ public class DelegationTokenFetcher {
DataInputStream in = new DataInputStream(
new ByteArrayInputStream(token.getIdentifier()));
id.readFields(in);
if(LOG.isDebugEnabled()) {
LOG.debug("Token (" + id + ") for " + token.getService());
}
System.out.println("Token (" + id + ") for " + token.getService());
}
return null;
}
@ -162,62 +160,50 @@ public class DelegationTokenFetcher {
for (Token<?> token : readTokens(tokenFile, conf)) {
result = renewDelegationToken(webUrl,
(Token<DelegationTokenIdentifier>) token);
if(LOG.isDebugEnabled()) {
LOG.debug("Renewed token via " + webUrl + " for "
System.out.println("Renewed token via " + webUrl + " for "
+ token.getService() + " until: " + new Date(result));
}
}
} else if (cancel) {
for (Token<?> token : readTokens(tokenFile, conf)) {
cancelDelegationToken(webUrl,
(Token<DelegationTokenIdentifier>) token);
if(LOG.isDebugEnabled()) {
LOG.debug("Cancelled token via " + webUrl + " for "
System.out.println("Cancelled token via " + webUrl + " for "
+ token.getService());
}
}
} else {
Credentials creds = getDTfromRemote(webUrl, renewer);
creds.writeTokenStorageFile(tokenFile, conf);
for (Token<?> token : creds.getAllTokens()) {
if(LOG.isDebugEnabled()) {
LOG.debug("Fetched token via " + webUrl + " for "
System.out.println("Fetched token via " + webUrl + " for "
+ token.getService() + " into " + tokenFile);
}
}
}
} else {
FileSystem fs = FileSystem.get(conf);
if (cancel) {
for (Token<?> token : readTokens(tokenFile, conf)) {
((DistributedFileSystem) fs)
.cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
if(LOG.isDebugEnabled()) {
LOG.debug("Cancelled token for "
System.out.println("Cancelled token for "
+ token.getService());
}
}
} else if (renew) {
long result;
for (Token<?> token : readTokens(tokenFile, conf)) {
result = ((DistributedFileSystem) fs)
.renewDelegationToken((Token<DelegationTokenIdentifier>) token);
if(LOG.isDebugEnabled()) {
LOG.debug("Renewed token for " + token.getService()
System.out.println("Renewed token for " + token.getService()
+ " until: " + new Date(result));
}
}
} else {
Token<?> token = fs.getDelegationToken(renewer);
Credentials cred = new Credentials();
cred.addToken(token.getService(), token);
cred.writeTokenStorageFile(tokenFile, conf);
if(LOG.isDebugEnabled()) {
LOG.debug("Fetched token for " + token.getService()
System.out.println("Fetched token for " + token.getService()
+ " into " + tokenFile);
}
}
}
return null;
}
});
@ -235,11 +221,6 @@ public class DelegationTokenFetcher {
} else {
url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Retrieving token from: " + url);
}
URL remoteURL = new URL(url.toString());
SecurityUtil.fetchServiceTicket(remoteURL);
URLConnection connection = remoteURL.openConnection();

View File

@ -17,31 +17,19 @@
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.mortbay.util.ajax.JSON;
/** JSON Utilities */
public class JsonUtil {
private static class ThreadLocalMap extends ThreadLocal<Map<String, Object>> {
private static final ThreadLocal<Map<String, Object>> jsonMap
= new ThreadLocal<Map<String, Object>>() {
@Override
protected Map<String, Object> initialValue() {
return new TreeMap<String, Object>();
@ -53,54 +41,7 @@ public class JsonUtil {
m.clear();
return m;
}
}
private static final ThreadLocalMap jsonMap = new ThreadLocalMap();
private static final ThreadLocalMap tokenMap = new ThreadLocalMap();
private static final ThreadLocalMap datanodeInfoMap = new ThreadLocalMap();
private static final ThreadLocalMap extendedBlockMap = new ThreadLocalMap();
private static final ThreadLocalMap locatedBlockMap = new ThreadLocalMap();
private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
/** Convert a token object to a Json string. */
public static String toJsonString(final Token<? extends TokenIdentifier> token
) throws IOException {
if (token == null) {
return null;
}
final Map<String, Object> m = tokenMap.get();
m.put("urlString", token.encodeToUrlString());
return JSON.toString(m);
}
/** Convert a Json map to a Token. */
public static Token<? extends TokenIdentifier> toToken(
final Map<?, ?> m) throws IOException {
if (m == null) {
return null;
}
final Token<DelegationTokenIdentifier> token
= new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString((String)m.get("urlString"));
return token;
}
/** Convert a Json map to a Token of DelegationTokenIdentifier. */
@SuppressWarnings("unchecked")
public static Token<DelegationTokenIdentifier> toDelegationToken(
final Map<?, ?> m) throws IOException {
return (Token<DelegationTokenIdentifier>)toToken(m);
}
/** Convert a Json map to a Token of BlockTokenIdentifier. */
@SuppressWarnings("unchecked")
public static Token<BlockTokenIdentifier> toBlockToken(
final Map<?, ?> m) throws IOException {
return (Token<BlockTokenIdentifier>)toToken(m);
}
};
/** Convert an exception object to a Json string. */
public static String toJsonString(final Exception e) {
@ -136,10 +77,11 @@ public class JsonUtil {
/** Convert a HdfsFileStatus object to a Json string. */
public static String toJsonString(final HdfsFileStatus status) {
if (status == null) {
return null;
} else {
final Map<String, Object> m = jsonMap.get();
if (status == null) {
m.put("isNull", true);
} else {
m.put("isNull", false);
m.put("localName", status.getLocalName());
m.put("isDir", status.isDir());
m.put("isSymlink", status.isSymlink());
@ -155,8 +97,8 @@ public class JsonUtil {
m.put("modificationTime", status.getModificationTime());
m.put("blockSize", status.getBlockSize());
m.put("replication", status.getReplication());
return JSON.toString(m);
}
return JSON.toString(m);
}
@SuppressWarnings("unchecked")
@ -164,9 +106,9 @@ public class JsonUtil {
return (Map<String, Object>) JSON.parse(jsonString);
}
/** Convert a Json map to a HdfsFileStatus object. */
/** Convert a Json string to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<String, Object> m) {
if (m == null) {
if ((Boolean)m.get("isNull")) {
return null;
}
@ -188,214 +130,4 @@ public class JsonUtil {
permission, owner, group,
symlink, DFSUtil.string2Bytes(localName));
}
/** Convert a LocatedBlock to a Json string. */
public static String toJsonString(final ExtendedBlock extendedblock) {
if (extendedblock == null) {
return null;
}
final Map<String, Object> m = extendedBlockMap.get();
m.put("blockPoolId", extendedblock.getBlockPoolId());
m.put("blockId", extendedblock.getBlockId());
m.put("numBytes", extendedblock.getNumBytes());
m.put("generationStamp", extendedblock.getGenerationStamp());
return JSON.toString(m);
}
/** Convert a Json map to an ExtendedBlock object. */
public static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
if (m == null) {
return null;
}
final String blockPoolId = (String)m.get("blockPoolId");
final long blockId = (Long)m.get("blockId");
final long numBytes = (Long)m.get("numBytes");
final long generationStamp = (Long)m.get("generationStamp");
return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
}
/** Convert a DatanodeInfo to a Json string. */
public static String toJsonString(final DatanodeInfo datanodeinfo) {
if (datanodeinfo == null) {
return null;
}
final Map<String, Object> m = datanodeInfoMap.get();
m.put("name", datanodeinfo.getName());
m.put("storageID", datanodeinfo.getStorageID());
m.put("infoPort", datanodeinfo.getInfoPort());
m.put("ipcPort", datanodeinfo.getIpcPort());
m.put("capacity", datanodeinfo.getCapacity());
m.put("dfsUsed", datanodeinfo.getDfsUsed());
m.put("remaining", datanodeinfo.getRemaining());
m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
m.put("lastUpdate", datanodeinfo.getLastUpdate());
m.put("xceiverCount", datanodeinfo.getXceiverCount());
m.put("networkLocation", datanodeinfo.getNetworkLocation());
m.put("hostName", datanodeinfo.getHostName());
m.put("adminState", datanodeinfo.getAdminState().name());
return JSON.toString(m);
}
/** Convert a Json map to an DatanodeInfo object. */
public static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
if (m == null) {
return null;
}
return new DatanodeInfo(
(String)m.get("name"),
(String)m.get("storageID"),
(int)(long)(Long)m.get("infoPort"),
(int)(long)(Long)m.get("ipcPort"),
(Long)m.get("capacity"),
(Long)m.get("dfsUsed"),
(Long)m.get("remaining"),
(Long)m.get("blockPoolUsed"),
(Long)m.get("lastUpdate"),
(int)(long)(Long)m.get("xceiverCount"),
(String)m.get("networkLocation"),
(String)m.get("hostName"),
AdminStates.valueOf((String)m.get("adminState")));
}
/** Convert a DatanodeInfo[] to a Json string. */
public static String toJsonString(final DatanodeInfo[] array
) throws IOException {
if (array == null) {
return null;
} else if (array.length == 0) {
return "[]";
} else {
final StringBuilder b = new StringBuilder().append('[').append(
toJsonString(array[0]));
for(int i = 1; i < array.length; i++) {
b.append(", ").append(toJsonString(array[i]));
}
return b.append(']').toString();
}
}
/** Convert an Object[] to a DatanodeInfo[]. */
public static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) {
if (objects == null) {
return null;
} else if (objects.length == 0) {
return EMPTY_DATANODE_INFO_ARRAY;
} else {
final DatanodeInfo[] array = new DatanodeInfo[objects.length];
for(int i = 0; i < array.length; i++) {
array[i] = (DatanodeInfo)toDatanodeInfo((Map<?, ?>) objects[i]);
}
return array;
}
}
/** Convert a LocatedBlock to a Json string. */
public static String toJsonString(final LocatedBlock locatedblock
) throws IOException {
if (locatedblock == null) {
return null;
}
final Map<String, Object> m = locatedBlockMap.get();
m.put("blockToken", toJsonString(locatedblock.getBlockToken()));
m.put("isCorrupt", locatedblock.isCorrupt());
m.put("startOffset", locatedblock.getStartOffset());
m.put("block", toJsonString(locatedblock.getBlock()));
m.put("locations", toJsonString(locatedblock.getLocations()));
return JSON.toString(m);
}
/** Convert a Json map to LocatedBlock. */
public static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
if (m == null) {
return null;
}
final ExtendedBlock b = toExtendedBlock((Map<?, ?>)JSON.parse((String)m.get("block")));
final DatanodeInfo[] locations = toDatanodeInfoArray(
(Object[])JSON.parse((String)m.get("locations")));
final long startOffset = (Long)m.get("startOffset");
final boolean isCorrupt = (Boolean)m.get("isCorrupt");
final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt);
locatedblock.setBlockToken(toBlockToken((Map<?, ?>)JSON.parse((String)m.get("blockToken"))));
return locatedblock;
}
/** Convert a LocatedBlock[] to a Json string. */
public static String toJsonString(final List<LocatedBlock> array
) throws IOException {
if (array == null) {
return null;
} else if (array.size() == 0) {
return "[]";
} else {
final StringBuilder b = new StringBuilder().append('[').append(
toJsonString(array.get(0)));
for(int i = 1; i < array.size(); i++) {
b.append(",\n ").append(toJsonString(array.get(i)));
}
return b.append(']').toString();
}
}
/** Convert an Object[] to a List of LocatedBlock.
* @throws IOException */
public static List<LocatedBlock> toLocatedBlockList(final Object[] objects
) throws IOException {
if (objects == null) {
return null;
} else if (objects.length == 0) {
return Collections.emptyList();
} else {
final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length);
for(int i = 0; i < objects.length; i++) {
list.add((LocatedBlock)toLocatedBlock((Map<?, ?>)objects[i]));
}
return list;
}
}
/** Convert LocatedBlocks to a Json string. */
public static String toJsonString(final LocatedBlocks locatedblocks
) throws IOException {
if (locatedblocks == null) {
return null;
}
final Map<String, Object> m = jsonMap.get();
m.put("fileLength", locatedblocks.getFileLength());
m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
m.put("locatedBlocks", toJsonString(locatedblocks.getLocatedBlocks()));
m.put("lastLocatedBlock", toJsonString(locatedblocks.getLastLocatedBlock()));
m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
return JSON.toString(m);
}
/** Convert a Json map to LocatedBlock. */
public static LocatedBlocks toLocatedBlocks(final Map<String, Object> m
) throws IOException {
if (m == null) {
return null;
}
final long fileLength = (Long)m.get("fileLength");
final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
(Object[])JSON.parse((String) m.get("locatedBlocks")));
final LocatedBlock lastLocatedBlock = toLocatedBlock(
(Map<?, ?>)JSON.parse((String)m.get("lastLocatedBlock")));
final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
lastLocatedBlock, isLastBlockComplete);
}
}

View File

@ -1,85 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.net.URI;
import java.util.List;
import java.util.Map;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.UriBuilder;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
/**
* A filter to change parameter names to lower cases
* so that parameter names are considered as case insensitive.
*/
public class ParamFilter implements ResourceFilter {
private static final ContainerRequestFilter LOWER_CASE
= new ContainerRequestFilter() {
@Override
public ContainerRequest filter(final ContainerRequest request) {
final MultivaluedMap<String, String> parameters = request.getQueryParameters();
if (containsUpperCase(parameters.keySet())) {
//rebuild URI
final URI lower = rebuildQuery(request.getRequestUri(), parameters);
request.setUris(request.getBaseUri(), lower);
}
return request;
}
};
@Override
public ContainerRequestFilter getRequestFilter() {
return LOWER_CASE;
}
@Override
public ContainerResponseFilter getResponseFilter() {
return null;
}
/** Do the strings contain upper case letters? */
private static boolean containsUpperCase(final Iterable<String> strings) {
for(String s : strings) {
for(int i = 0; i < s.length(); i++) {
if (Character.isUpperCase(s.charAt(i))) {
return true;
}
}
}
return false;
}
/** Rebuild the URI query with lower case parameter names. */
private static URI rebuildQuery(final URI uri,
final MultivaluedMap<String, String> parameters) {
UriBuilder b = UriBuilder.fromUri(uri).replaceQuery("");
for(Map.Entry<String, List<String>> e : parameters.entrySet()) {
final String key = e.getKey().toLowerCase();
for(String v : e.getValue()) {
b = b.queryParam(key, v);
}
}
return b.build();
}
}

View File

@ -27,12 +27,9 @@ import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -48,7 +45,6 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@ -58,9 +54,7 @@ import org.apache.hadoop.hdfs.web.resources.DstPathParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
import org.apache.hadoop.hdfs.web.resources.OwnerParam;
import org.apache.hadoop.hdfs.web.resources.Param;
@ -69,16 +63,13 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.mortbay.util.ajax.JSON;
@ -91,24 +82,17 @@ public class WebHdfsFileSystem extends HftpFileSystem {
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
private final UserGroupInformation ugi;
private UserGroupInformation ugi;
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
protected Path workingDir;
{
try {
ugi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public synchronized void initialize(URI uri, Configuration conf
) throws IOException {
super.initialize(uri, conf);
setConf(conf);
ugi = UserGroupInformation.getCurrentUser();
this.workingDir = getHomeDirectory();
}
@ -179,11 +163,11 @@ public class WebHdfsFileSystem extends HftpFileSystem {
}
}
URL toUrl(final HttpOpParam.Op op, final Path fspath,
private URL toUrl(final HttpOpParam.Op op, final Path fspath,
final Param<?,?>... parameters) throws IOException {
//initialize URI path and query
final String path = "/" + PATH_PREFIX
+ (fspath == null? "/": makeQualified(fspath).toUri().getPath());
+ makeQualified(fspath).toUri().getPath();
final String query = op.toQueryString()
+ '&' + new UserParam(ugi)
+ Param.toSortedString("&", parameters);
@ -412,41 +396,4 @@ public class WebHdfsFileSystem extends HftpFileSystem {
}
return statuses;
}
@Override
public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m);
token.setService(new Text(getCanonicalServiceName()));
return token;
}
@Override
public List<Token<?>> getDelegationTokens(final String renewer
) throws IOException {
final Token<?>[] t = {getDelegationToken(renewer)};
return Arrays.asList(t);
}
@Override
public BlockLocation[] getFileBlockLocations(final FileStatus status,
final long offset, final long length) throws IOException {
if (status == null) {
return null;
}
return getFileBlockLocations(status.getPath(), offset, length);
}
@Override
public BlockLocation[] getFileBlockLocations(final Path p,
final long offset, final long length) throws IOException {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
final Map<String, Object> m = run(op, p, new OffsetParam(offset),
new LengthParam(length));
return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
}
}

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
/** Access time parameter. */
public class AccessTimeParam extends LongParam {
/** Parameter name. */
public static final String NAME = "accesstime";
public static final String NAME = "accessTime";
/** Default parameter value. */
public static final String DEFAULT = "-1";

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration;
/** Block size parameter. */
public class BlockSizeParam extends LongParam {
/** Parameter name. */
public static final String NAME = "blocksize";
public static final String NAME = "blockSize";
/** Default parameter value. */
public static final String DEFAULT = NULL;

View File

@ -23,7 +23,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/** Buffer size parameter. */
public class BufferSizeParam extends IntegerParam {
/** Parameter name. */
public static final String NAME = "buffersize";
public static final String NAME = "bufferSize";
/** Default parameter value. */
public static final String DEFAULT = NULL;

View File

@ -17,12 +17,13 @@
*/
package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.UserGroupInformation;
/** Delegation token parameter. */
public class DelegationParam extends StringParam {
/** Parameter name. */
public static final String NAME = "delegation";
public static final String NAME = JspHelper.DELEGATION_PARAMETER_NAME;
/** Default parameter value. */
public static final String DEFAULT = "";

View File

@ -21,6 +21,9 @@ import java.net.HttpURLConnection;
/** Http DELETE operation parameter. */
public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
/** Parameter name. */
public static final String NAME = "deleteOp";
/** Delete operations. */
public static enum Op implements HttpOpParam.Op {
DELETE(HttpURLConnection.HTTP_OK),

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path;
/** Destination path parameter. */
public class DstPathParam extends StringParam {
/** Parameter name. */
public static final String NAME = "dstpath";
public static final String NAME = "dstPath";
/** Default parameter value. */
public static final String DEFAULT = "";

View File

@ -21,16 +21,16 @@ import java.net.HttpURLConnection;
/** Http GET operation parameter. */
public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
/** Parameter name. */
public static final String NAME = "getOp";
/** Get operations. */
public static enum Op implements HttpOpParam.Op {
OPEN(HttpURLConnection.HTTP_OK),
GETFILEBLOCKLOCATIONS(HttpURLConnection.HTTP_OK),
GETFILESTATUS(HttpURLConnection.HTTP_OK),
LISTSTATUS(HttpURLConnection.HTTP_OK),
GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final int expectedHttpResponseCode;

View File

@ -20,9 +20,6 @@ package org.apache.hadoop.hdfs.web.resources;
/** Http operation parameter. */
public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
extends EnumParam<E> {
/** Parameter name. */
public static final String NAME = "op";
/** Default parameter value. */
public static final String DEFAULT = NULL;

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
/** Modification time parameter. */
public class ModificationTimeParam extends LongParam {
/** Parameter name. */
public static final String NAME = "modificationtime";
public static final String NAME = "modificationTime";
/** Default parameter value. */
public static final String DEFAULT = "-1";

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.web.resources;
/** Overwrite parameter. */
/** Recursive parameter. */
public class OverwriteParam extends BooleanParam {
/** Parameter name. */
public static final String NAME = "overwrite";

View File

@ -21,6 +21,9 @@ import java.net.HttpURLConnection;
/** Http POST operation parameter. */
public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
/** Parameter name. */
public static final String NAME = "postOp";
/** Post operations. */
public static enum Op implements HttpOpParam.Op {
APPEND(HttpURLConnection.HTTP_OK),

View File

@ -21,6 +21,9 @@ import java.net.HttpURLConnection;
/** Http POST operation parameter. */
public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
/** Parameter name. */
public static final String NAME = "putOp";
/** Put operations. */
public static enum Op implements HttpOpParam.Op {
CREATE(true, HttpURLConnection.HTTP_CREATED),

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Options;
/** Rename option set parameter. */
public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
/** Parameter name. */
public static final String NAME = "renameoptions";
public static final String NAME = "renameOptions";
/** Default parameter value. */
public static final String DEFAULT = "";

View File

@ -1,41 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
/** Renewer parameter. */
public class RenewerParam extends StringParam {
/** Parameter name. */
public static final String NAME = "renewer";
/** Default parameter value. */
public static final String DEFAULT = NULL;
private static final Domain DOMAIN = new Domain(NAME, null);
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public RenewerParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -683,4 +683,24 @@ creations/deletions), or "all".</description>
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.principal</name>
<value>HTTP/${dfs.web.hostname}@${kerberos.realm}</value>
<description>
The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
HTTP SPENGO specification.
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.keytab</name>
<value>${user.home}/dfs.web.keytab</value>
<description>
The Kerberos keytab file with the credentials for the
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
</description>
</property>
</configuration>

View File

@ -72,7 +72,6 @@ public class TestDFSPermission extends TestCase {
final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
private FileSystem fs;
private MiniDFSCluster cluster;
private static Random r;
static {
@ -106,25 +105,18 @@ public class TestDFSPermission extends TestCase {
}
}
@Override
public void setUp() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
}
@Override
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
/** This tests if permission setting in create, mkdir, and
* setPermission works correctly
*/
public void testPermissionSetting() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
testPermissionSetting(OpType.CREATE); // test file creation
testPermissionSetting(OpType.MKDIRS); // test directory creation
} finally {
cluster.shutdown();
}
}
private void initFileSystem(short umask) throws Exception {
@ -253,22 +245,17 @@ public class TestDFSPermission extends TestCase {
}
}
/**
* check that ImmutableFsPermission can be used as the argument
* to setPermission
*/
public void testImmutableFsPermission() throws IOException {
fs = FileSystem.get(conf);
// set the permission of the root to be world-wide rwx
fs.setPermission(new Path("/"),
FsPermission.createImmutable((short)0777));
}
/* check if the ownership of a file/directory is set correctly */
public void testOwnership() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
testOwnership(OpType.CREATE); // test file creation
testOwnership(OpType.MKDIRS); // test directory creation
} finally {
fs.close();
cluster.shutdown();
}
}
/* change a file/directory's owner and group.
@ -355,7 +342,9 @@ public class TestDFSPermission extends TestCase {
/* Check if namenode performs permission checking correctly for
* superuser, file owner, group owner, and other users */
public void testPermissionChecking() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
fs = FileSystem.get(conf);
// set the permission of the root to be world-wide rwx
@ -412,6 +401,7 @@ public class TestDFSPermission extends TestCase {
parentPermissions, permissions, parentPaths, filePaths, dirPaths);
} finally {
fs.close();
cluster.shutdown();
}
}

View File

@ -29,7 +29,8 @@ import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.HadoopIllegalArgumentException;
import junit.framework.Assert;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -39,7 +40,8 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
public class TestDFSUtil {
/**
@ -82,111 +84,42 @@ public class TestDFSUtil {
assertEquals(0, bs.length);
}
private Configuration setupAddress(String key) {
/**
* Test for
* {@link DFSUtil#getNameServiceIds(Configuration)}
* {@link DFSUtil#getNameServiceId(Configuration)}
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
*/
@Test
public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
return conf;
}
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* nameserviceId from the configuration returned
*/
@Test
public void getNameServiceId() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
* nameserviceId for namenode is determined based on matching the address with
* local node's address
*/
@Test
public void getNameNodeNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test
public void getBackupNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test
public void getSecondaryNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
* exception is thrown when multiple rpc addresses match the local node's
* address
*/
@Test(expected = HadoopIllegalArgumentException.class)
public void testGetNameServiceIdException() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
"localhost:9000");
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
"localhost:9001");
DFSUtil.getNamenodeNameServiceId(conf);
fail("Expected exception is not thrown");
}
/**
* Test {@link DFSUtil#getNameServiceIds(Configuration)}
*/
@Test
public void testGetNameServiceIds() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
// Test - The configured nameserviceIds are returned
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
Iterator<String> it = nameserviceIds.iterator();
assertEquals(2, nameserviceIds.size());
assertEquals("nn1", it.next().toString());
assertEquals("nn2", it.next().toString());
}
/**
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
* {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
* (Configuration)}
*/
@Test
public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
// Tests default nameserviceId is returned
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
assertEquals("nn1", DFSUtil.getNameServiceId(conf));
// Test - configured list of namenodes are returned
final String NN1_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001";
final String NN3_ADDRESS = "localhost:9002";
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
NN1_ADDRESS);
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
NN2_ADDRESS);
conf.set(DFSUtil.getNameServiceIdKey(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
conf.set(DFSUtil.getNameServiceIdKey(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
Collection<InetSocketAddress> nnAddresses = DFSUtil
.getNNServiceRpcAddresses(conf);
Collection<InetSocketAddress> nnAddresses =
DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(2, nnAddresses.size());
Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
assertEquals(2, nameserviceIds.size());
InetSocketAddress addr = iterator.next();
assertEquals("localhost", addr.getHostName());
assertEquals(9000, addr.getPort());
@ -195,17 +128,24 @@ public class TestDFSUtil {
assertEquals(9001, addr.getPort());
// Test - can look up nameservice ID from service address
checkNameServiceId(conf, NN1_ADDRESS, "nn1");
checkNameServiceId(conf, NN2_ADDRESS, "nn2");
checkNameServiceId(conf, NN3_ADDRESS, null);
}
public void checkNameServiceId(Configuration conf, String addr,
String expectedNameServiceId) {
InetSocketAddress s = NetUtils.createSocketAddr(addr);
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(expectedNameServiceId, nameserviceId);
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
conf, testAddress1,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals("nn1", nameserviceId);
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
conf, testAddress2,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals("nn2", nameserviceId);
InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
conf, testAddress3,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertNull(nameserviceId);
}
/**
@ -217,15 +157,17 @@ public class TestDFSUtil {
HdfsConfiguration conf = new HdfsConfiguration();
final String DEFAULT_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001";
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertTrue(isDefault);
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
assertFalse(isDefault);
}
@ -234,8 +176,8 @@ public class TestDFSUtil {
public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
final String hdfs_default = "hdfs://localhost:9999/";
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
// If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that
// default namenode address is returned.
List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1, addrList.size());
@ -249,9 +191,9 @@ public class TestDFSUtil {
@Test
public void testConfModification() throws IOException {
final HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
final String nameserviceId = DFSUtil.getNameServiceId(conf);
// Set the nameservice specific keys with nameserviceId in the config key
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
@ -260,7 +202,7 @@ public class TestDFSUtil {
}
// Initialize generic keys from specific keys
NameNode.initializeGenericKeys(conf, nameserviceId);
NameNode.initializeGenericKeys(conf);
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
@ -303,9 +245,9 @@ public class TestDFSUtil {
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
String httpsport = DFSUtil.getInfoServer(null, conf, true);
assertEquals("0.0.0.0:50470", httpsport);
Assert.assertEquals("0.0.0.0:50470", httpsport);
String httpport = DFSUtil.getInfoServer(null, conf, false);
assertEquals("0.0.0.0:50070", httpport);
Assert.assertEquals("0.0.0.0:50070", httpport);
}
}

View File

@ -17,10 +17,6 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
@ -28,15 +24,17 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.junit.Test;
import static org.junit.Assert.*;
/** A class for testing quota-related commands */
public class TestQuota {
@ -843,14 +841,6 @@ public class TestQuota {
DFSAdmin admin = new DFSAdmin(conf);
try {
//Test for deafult NameSpace Quota
long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
.getNamesystem());
assertTrue(
"Default namespace quota expected as long max. But the value is :"
+ nsQuota, nsQuota == Long.MAX_VALUE);
Path dir = new Path("/test");
boolean exceededQuota = false;
ContentSummary c;

View File

@ -23,12 +23,12 @@ package org.apache.hadoop.hdfs.security;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -38,16 +38,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -60,13 +56,12 @@ public class TestDelegationToken {
@Before
public void setUp() throws Exception {
config = new HdfsConfiguration();
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
config.set("hadoop.security.auth_to_local",
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
cluster = new MiniDFSCluster.Builder(config).build();
cluster.waitActive();
dtSecretManager = NameNodeAdapter.getDtSecretManager(
cluster.getNamesystem());
@ -158,31 +153,6 @@ public class TestDelegationToken {
dtSecretManager.renewToken(token, "JobTracker");
}
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
final String uri = WebHdfsFileSystem.SCHEME + "://"
+ config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
//get file system as JobTracker
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
"JobTracker", new String[]{"user"});
final WebHdfsFileSystem webhdfs = ugi.doAs(
new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws Exception {
return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
}
});
final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
LOG.info("A valid token should have non-null password, and should be renewed successfully");
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token, "JobTracker");
}
@Test
public void testDelegationTokenWithDoAs() throws Exception {
final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();

View File

@ -18,34 +18,31 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
public class TestHost2NodesMap {
private Host2NodesMap map = new Host2NodesMap();
private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
public class TestHost2NodesMap extends TestCase {
static private Host2NodesMap map = new Host2NodesMap();
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
};
private final DatanodeDescriptor NULL_NODE = null;
private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
"/d1/r4");
private final static DatanodeDescriptor NULL_NODE = null;
private final static DatanodeDescriptor NODE =
new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
@Before
public void setup() {
static {
for(DatanodeDescriptor node:dataNodes) {
map.add(node);
}
map.add(NULL_NODE);
}
@Test
public void testContains() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
@ -54,7 +51,6 @@ public class TestHost2NodesMap {
assertFalse(map.contains(NODE));
}
@Test
public void testGetDatanodeByHost() throws Exception {
assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
@ -63,7 +59,6 @@ public class TestHost2NodesMap {
assertTrue(null==map.getDatanodeByHost("h4"));
}
@Test
public void testGetDatanodeByName() throws Exception {
assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
assertTrue(map.getDatanodeByName("h1:5030")==null);
@ -76,7 +71,6 @@ public class TestHost2NodesMap {
assertTrue(map.getDatanodeByName(null)==null);
}
@Test
public void testRemove() throws Exception {
assertFalse(map.remove(NODE));

View File

@ -96,8 +96,7 @@ public class TestMulitipleNNDataBlockScanner {
String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
for (int i = 0; i < 2; i++) {
String nsId = DFSUtil.getNamenodeNameServiceId(cluster
.getConfiguration(i));
String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
namenodesBuilder.append(nsId);
namenodesBuilder.append(",");
}
@ -117,7 +116,7 @@ public class TestMulitipleNNDataBlockScanner {
LOG.info(ex.getMessage());
}
namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
.getConfiguration(2)));
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
.toString());

View File

@ -17,24 +17,21 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import org.apache.hadoop.hdfs.protocol.Block;
import org.junit.Before;
import static org.junit.Assert.*;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Unit test for ReplicasMap class
*/
public class TestReplicasMap {
private final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
private final String bpid = "BP-TEST";
private final Block block = new Block(1234, 1234, 1234);
private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
private static final String bpid = "BP-TEST";
private static final Block block = new Block(1234, 1234, 1234);
@Before
public void setup() {
@BeforeClass
public static void setup() {
map.add(bpid, new FinalizedReplica(block, null, null));
}

View File

@ -412,11 +412,4 @@ public abstract class FSImageTestUtil {
public static FSImage getFSImage(NameNode node) {
return node.getFSImage();
}
/**
* get NameSpace quota.
*/
public static long getNSQuota(FSNamesystem ns) {
return ns.dir.rootDir.getNsQuota();
}
}

View File

@ -1,290 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
import org.junit.Test;
public class TestProcessCorruptBlocks {
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this the above condition is
* tested by reducing the replication factor
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (2) is less than replication factor (3))
* Set the replication factor to 2
* Verify that the corrupt replica is removed.
* (corrupt replica should not be removed since number of good
* replicas (2) is equal to replication factor (2))
*/
@Test
public void testWhenDecreasingReplication() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
namesystem.setReplication(fileName.toString(), (short) 2);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this test, the above
* condition is achieved by increasing the number of good replicas by
* replicating on a new Datanode.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good replicas
* (2) is less than replication factor (3))
* Start a new data node
* Verify that the a new replica is created and corrupt replica is
* removed.
*
*/
@Test
public void testByAddingAnExtraDataNode() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
cluster.restartDataNode(dnPropsFourth);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
assertEquals(3, countReplicas(namesystem, block).liveReplicas());
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. The above condition should hold
* true as long as there is one good replica. This test verifies that.
*
* The test strategy :
* Bring up Cluster with 2 DataNodes
* Create a file of replication factor 2
* Corrupt one replica of a block of the file
* Verify that there is one good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (1) is less than replication factor (2)).
* Set the replication factor to 1
* Verify that the corrupt replica is removed.
* (corrupt replica should be removed since number of good
* replicas (1) is equal to replication factor (1))
*/
@Test
public void testWithReplicationFactorAsOne() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 2, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
assertEquals(1, countReplicas(namesystem, block).liveReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
namesystem.setReplication(fileName.toString(), (short) 1);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(1, countReplicas(namesystem, block).liveReplicas());
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
/**
* None of the blocks can be removed if all blocks are corrupt.
*
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt all three replicas
* Verify that all replicas are corrupt and 3 replicas are present.
* Set the replication factor to 1
* Verify that all replicas are corrupt and 3 replicas are present.
*/
@Test
public void testWithAllCorruptReplicas() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
corruptBlock(cluster, fs, fileName, 1, block);
corruptBlock(cluster, fs, fileName, 2, block);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(0, countReplicas(namesystem, block).liveReplicas());
assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
namesystem.setReplication(fileName.toString(), (short) 1);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(0, countReplicas(namesystem, block).liveReplicas());
assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
private static NumberReplicas countReplicas(final FSNamesystem namesystem, ExtendedBlock block) {
return namesystem.getBlockManager().countNodes(block.getLocalBlock());
}
private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path fileName,
int dnIndex, ExtendedBlock block) throws IOException {
// corrupt the block on datanode dnIndex
// the indexes change once the nodes are restarted.
// But the datadirectory will not change
assertTrue(MiniDFSCluster.corruptReplica(dnIndex, block));
DataNodeProperties dnProps = cluster.stopDataNode(0);
// Each datanode has multiple data dirs, check each
for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = MiniDFSCluster.getStorageDir(dnIndex, dirIndex);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
if (scanLogFile.exists()) {
// wait for one minute for deletion to succeed;
for (int i = 0; !scanLogFile.delete(); i++) {
assertTrue("Could not delete log file in one minute", i < 60);
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
}
}
}
// restart the detained so the corrupt replica will be detected
cluster.restartDataNode(dnProps);
}
}

View File

@ -18,23 +18,17 @@
package org.apache.hadoop.hdfs.web;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@ -120,42 +114,4 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
// also okay for HDFS.
}
}
public void testGetFileBlockLocations() throws IOException {
final String f = "/test/testGetFileBlockLocations";
createFile(path(f));
final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
new Path(f), 0L, 1L);
assertEquals(expected.length, computed.length);
for(int i = 0; i < computed.length; i++) {
assertEquals(expected[i].toString(), computed[i].toString());
}
}
public void testCaseInsensitive() throws IOException {
final Path p = new Path("/test/testCaseInsensitive");
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
//replace query with mix case letters
final URL url = webhdfs.toUrl(op, p);
WebHdfsFileSystem.LOG.info("url = " + url);
final URL replaced = new URL(url.toString().replace(op.toQueryString(),
"Op=mkDIrs"));
WebHdfsFileSystem.LOG.info("replaced = " + replaced);
//connect with the replaced URL.
final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
final BufferedReader in = new BufferedReader(new InputStreamReader(
conn.getInputStream()));
for(String line; (line = in.readLine()) != null; ) {
WebHdfsFileSystem.LOG.info("> " + line);
}
//check if the command successes.
assertTrue(fs.getFileStatus(p).isDirectory());
}
}

View File

@ -29,8 +29,6 @@ Trunk (unreleased changes)
findBugs, correct links to findBugs artifacts and no links to the
artifacts when there are no warnings. (Tom White via vinodkv).
MAPREDUCE-3081. Fix vaidya startup script. (gkesavan via suhas).
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
@ -72,9 +70,6 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2037. Capture intermediate progress, CPU and memory usage for
tasks. (Dick King via acmurthy)
MAPREDUCE-2930. Added the ability to be able to generate graphs from the
state-machine definitions. (Binglin Chang via vinodkv)
IMPROVEMENTS
MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
@ -312,15 +307,6 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2726. Added job-file to the AM and JobHistoryServer web
interfaces. (Jeffrey Naisbitt via vinodkv)
MAPREDUCE-3055. Simplified ApplicationAttemptId passing to
ApplicationMaster via environment variable. (vinodkv)
MAPREDUCE-3092. Removed a special comparator for JobIDs in JobHistory as
JobIDs are already comparable. (Devaraj K via vinodkv)
MAPREDUCE-3099. Add docs for setting up a single node MRv2 cluster.
(mahadev)
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and
@ -332,9 +318,6 @@ Release 0.23.0 - Unreleased
MAPREDUCE-901. Efficient framework counters. (llu via acmurthy)
MAPREDUCE-2880. Improve classpath-construction for mapreduce AM and
containers. (Arun C Murthy via vinodkv)
BUG FIXES
MAPREDUCE-2603. Disable High-Ram emulation in system tests.
@ -1387,91 +1370,6 @@ Release 0.23.0 - Unreleased
YarnClientProtocolProvider and ensured MiniMRYarnCluster sets JobHistory
configuration for tests. (acmurthy)
MAPREDUCE-3018. Fixed -file option for streaming. (mahadev via acmurthy)
MAPREDUCE-3036. Fixed metrics for reserved resources in CS. (Robert Evans
via acmurthy)
MAPREDUCE-2998. Fixed a bug in TaskAttemptImpl which caused it to fork
bin/mapred too many times. (vinodkv via acmurthy)
MAPREDUCE-3023. Fixed clients to display queue state correctly. (Ravi
Prakash via acmurthy)
MAPREDUCE-2970. Fixed NPEs in corner cases with different configurations
for mapreduce.framework.name. (Venu Gopala Rao via vinodkv)
MAPREDUCE-3062. Fixed default RMAdmin address. (Chris Riccomini
via acmurthy)
MAPREDUCE-3066. Fixed default ResourceTracker address for the NodeManager.
(Chris Riccomini via acmurthy)
MAPREDUCE-3044. Pipes jobs stuck without making progress. (mahadev)
MAPREDUCE-2754. Fixed MR AM stdout, stderr and syslog to redirect to
correct log-files. (Ravi Teja Ch N V via vinodkv)
MAPREDUCE-3073. Fixed build issues in MR1. (mahadev via acmurthy)
MAPREDUCE-2691. Increase threadpool size for launching containers in
MapReduce ApplicationMaster. (vinodkv via acmurthy)
MAPREDUCE-2990. Fixed display of NodeHealthStatus. (Subroto Sanyal via
acmurthy)
MAPREDUCE-3053. Better diagnostic message for unknown methods in ProtoBuf
RPCs. (vinodkv via acmurthy)
MAPREDUCE-2952. Fixed ResourceManager/MR-client to consume diagnostics
for AM failures in a couple of corner cases. (Arun C Murthy via vinodkv)
MAPREDUCE-3064. 27 unit test failures with Invalid
"mapreduce.jobtracker.address" configuration value for
JobTracker: "local" (Venu Gopala Rao via mahadev)
MAPREDUCE-3090. Fix MR AM to use ApplicationAttemptId rather than
(ApplicationId, startCount) consistently. (acmurthy)
MAPREDUCE-2646. Fixed AMRMProtocol to return containers based on
priority. (Sharad Agarwal and Arun C Murthy via vinodkv)
MAPREDUCE-3031. Proper handling of killed containers to prevent stuck
containers/AMs on an external kill signal. (Siddharth Seth via vinodkv)
MAPREDUCE-2984. Better error message for displaying completed containers.
(Devaraj K via acmurthy)
MAPREDUCE-3071. app master configuration web UI link under the Job menu
opens up application menu. (thomas graves via mahadev)
MAPREDUCE-3067. Ensure exit-code is set correctly for containers. (Hitesh
Shah via acmurthy)
MAPREDUCE-2999. Fix YARN webapp framework to properly filter servlet
paths. (Thomas Graves via vinodkv)
MAPREDUCE-3095. fairscheduler ivy including wrong version for hdfs.
(John George via mahadev)
MAPREDUCE-3054. Unable to kill submitted jobs. (mahadev)
MAPREDUCE-3021. Change base urls for RM web-ui. (Thomas Graves via
acmurthy)
MAPREDUCE-3041. Fixed ClientRMProtocol to provide min/max resource
capabilities along-with new ApplicationId for application submission.
(Hitesh Shah via acmurthy)
MAPREDUCE-2843. Fixed the node-table to be completely displayed and making
node entries on RM UI to be sortable. (Abhijit Suresh Shingate via vinodkv)
MAPREDUCE-3110. Fixed TestRPC failure. (vinodkv)
MAPREDUCE-3078. Ensure MapReduce AM reports progress correctly for
displaying on the RM Web-UI. (vinodkv via acmurthy)
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES

View File

@ -55,12 +55,6 @@
<artifactId>hadoop-yarn-server-resourcemanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-resourcemanager</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-shuffle</artifactId>
@ -119,41 +113,4 @@
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>visualize</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2</version>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>java</goal>
</goals>
<configuration>
<classpathScope>test</classpathScope>
<mainClass>org.apache.hadoop.yarn.util.VisualizeStateMachine</mainClass>
<arguments>
<argument>MapReduce</argument>
<argument>org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl,
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl,
org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl</argument>
<argument>MapReduce.gv</argument>
</arguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -18,27 +18,27 @@
package org.apache.hadoop.mapred;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import org.apache.hadoop.fs.Path;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.TaskLog.LogName;
import org.apache.hadoop.mapreduce.ID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
public class MapReduceChildJVM {
private static final String SYSTEM_PATH_SEPARATOR =
System.getProperty("path.separator");
private static String getTaskLogFile(LogName filter) {
return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
filter.toString();
private static final Log LOG = LogFactory.getLog(MapReduceChildJVM.class);
private static File getTaskLogFile(String logDir, LogName filter) {
return new File(logDir, filter.toString());
}
private static String getChildEnv(JobConf jobConf, boolean isMap) {
@ -50,53 +50,32 @@ public class MapReduceChildJVM {
jobConf.get(jobConf.MAPRED_TASK_ENV));
}
private static String getChildLogLevel(JobConf conf, boolean isMap) {
if (isMap) {
return conf.get(
MRJobConfig.MAP_LOG_LEVEL,
JobConf.DEFAULT_LOG_LEVEL.toString()
);
} else {
return conf.get(
MRJobConfig.REDUCE_LOG_LEVEL,
JobConf.DEFAULT_LOG_LEVEL.toString()
);
}
}
public static void setVMEnv(Map<String, String> environment,
Task task) {
public static void setVMEnv(Map<String, String> env,
List<String> classPaths, String pwd, String containerLogDir,
String nmLdLibraryPath, Task task, CharSequence applicationTokensFile) {
JobConf conf = task.conf;
// Shell
environment.put(
Environment.SHELL.name(),
conf.get(
MRJobConfig.MAPRED_ADMIN_USER_SHELL,
MRJobConfig.DEFAULT_SHELL)
);
// Add classpath.
CharSequence cp = env.get("CLASSPATH");
String classpath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
if (null == cp) {
env.put("CLASSPATH", classpath);
} else {
env.put("CLASSPATH", classpath + SYSTEM_PATH_SEPARATOR + cp);
}
// Add pwd to LD_LIBRARY_PATH, add this before adding anything else
MRApps.addToEnvironment(
environment,
Environment.LD_LIBRARY_PATH.name(),
Environment.PWD.$());
/////// Environmental variable LD_LIBRARY_PATH
StringBuilder ldLibraryPath = new StringBuilder();
// Add the env variables passed by the user & admin
String mapredChildEnv = getChildEnv(conf, task.isMapTask());
MRApps.setEnvFromInputString(environment, mapredChildEnv);
MRApps.setEnvFromInputString(
environment,
conf.get(
MRJobConfig.MAPRED_ADMIN_USER_ENV,
MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV)
);
ldLibraryPath.append(nmLdLibraryPath);
ldLibraryPath.append(SYSTEM_PATH_SEPARATOR);
ldLibraryPath.append(pwd);
env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
/////// Environmental variable LD_LIBRARY_PATH
// Set logging level
environment.put(
"HADOOP_ROOT_LOGGER",
getChildLogLevel(conf, task.isMapTask()) + ",CLA");
// for the child of task jvm, set hadoop.root.logger
env.put("HADOOP_ROOT_LOGGER", "DEBUG,CLA"); // TODO: Debug
// TODO: The following is useful for instance in streaming tasks. Should be
// set in ApplicationMaster's env by the RM.
@ -110,69 +89,76 @@ public class MapReduceChildJVM {
// properties.
long logSize = TaskLog.getTaskLogLength(conf);
Vector<String> logProps = new Vector<String>(4);
setupLog4jProperties(logProps, logSize);
setupLog4jProperties(logProps, logSize, containerLogDir);
Iterator<String> it = logProps.iterator();
StringBuffer buffer = new StringBuffer();
while (it.hasNext()) {
buffer.append(" " + it.next());
}
hadoopClientOpts = hadoopClientOpts + buffer.toString();
environment.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
// Add stdout/stderr env
environment.put(
MRJobConfig.STDOUT_LOGFILE_ENV,
getTaskLogFile(TaskLog.LogName.STDOUT)
);
environment.put(
MRJobConfig.STDERR_LOGFILE_ENV,
getTaskLogFile(TaskLog.LogName.STDERR)
);
env.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
// add the env variables passed by the user
String mapredChildEnv = getChildEnv(conf, task.isMapTask());
if (mapredChildEnv != null && mapredChildEnv.length() > 0) {
String childEnvs[] = mapredChildEnv.split(",");
for (String cEnv : childEnvs) {
String[] parts = cEnv.split("="); // split on '='
String value = (String) env.get(parts[0]);
if (value != null) {
// replace $env with the child's env constructed by tt's
// example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
value = parts[1].replace("$" + parts[0], value);
} else {
// this key is not configured by the tt for the child .. get it
// from the tt's env
// example PATH=$PATH:/tmp
value = System.getenv(parts[0]); // Get from NM?
if (value != null) {
// the env key is present in the tt's env
value = parts[1].replace("$" + parts[0], value);
} else {
// the env key is note present anywhere .. simply set it
// example X=$X:/tmp or X=/tmp
value = parts[1].replace("$" + parts[0], "");
}
}
env.put(parts[0], value);
}
}
//This should not be set here (If an OS check is requied. moved to ContainerLuanch)
// env.put("JVM_PID", "`echo $$`");
env.put(Constants.STDOUT_LOGFILE_ENV,
getTaskLogFile(containerLogDir, TaskLog.LogName.STDOUT).toString());
env.put(Constants.STDERR_LOGFILE_ENV,
getTaskLogFile(containerLogDir, TaskLog.LogName.STDERR).toString());
}
private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
String userClasspath = "";
String adminClasspath = "";
if (isMapTask) {
userClasspath =
jobConf.get(
JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
jobConf.get(
return jobConf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, jobConf.get(
JobConf.MAPRED_TASK_JAVA_OPTS,
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
);
adminClasspath =
jobConf.get(
MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
} else {
userClasspath =
jobConf.get(
JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
jobConf.get(
JobConf.MAPRED_TASK_JAVA_OPTS,
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
);
adminClasspath =
jobConf.get(
MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
}
// Add admin classpath first so it can be overridden by user.
return adminClasspath + " " + userClasspath;
return jobConf
.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, jobConf.get(
JobConf.MAPRED_TASK_JAVA_OPTS,
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
}
private static void setupLog4jProperties(Vector<String> vargs,
long logSize) {
long logSize, String containerLogDir) {
vargs.add("-Dlog4j.configuration=container-log4j.properties");
vargs.add("-D" + MRJobConfig.TASK_LOG_DIR + "=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
vargs.add("-D" + MRJobConfig.TASK_LOG_SIZE + "=" + logSize);
vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + containerLogDir);
vargs.add("-Dhadoop.yarn.mr.totalLogFileSize=" + logSize);
}
public static List<String> getVMCommand(
InetSocketAddress taskAttemptListenerAddr, Task task,
ID jvmID) {
InetSocketAddress taskAttemptListenerAddr, Task task, String javaHome,
String workDir, String logDir, String childTmpDir, ID jvmID) {
TaskAttemptID attemptID = task.getTaskID();
JobConf conf = task.conf;
@ -180,7 +166,7 @@ public class MapReduceChildJVM {
Vector<String> vargs = new Vector<String>(8);
vargs.add("exec");
vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
vargs.add(javaHome + "/bin/java");
// Add child (task) java-vm options.
//
@ -213,26 +199,44 @@ public class MapReduceChildJVM {
String javaOpts = getChildJavaOpts(conf, task.isMapTask());
javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
String [] javaOptsSplit = javaOpts.split(" ");
// Add java.library.path; necessary for loading native libraries.
//
// 1. We add the 'cwd' of the task to it's java.library.path to help
// users distribute native libraries via the DistributedCache.
// 2. The user can also specify extra paths to be added to the
// java.library.path via mapred.{map|reduce}.child.java.opts.
//
String libraryPath = workDir;
boolean hasUserLDPath = false;
for(int i=0; i<javaOptsSplit.length ;i++) {
if(javaOptsSplit[i].startsWith("-Djava.library.path=")) {
// TODO: Does the above take care of escaped space chars
javaOptsSplit[i] += SYSTEM_PATH_SEPARATOR + libraryPath;
hasUserLDPath = true;
break;
}
}
if(!hasUserLDPath) {
vargs.add("-Djava.library.path=" + libraryPath);
}
for (int i = 0; i < javaOptsSplit.length; i++) {
vargs.add(javaOptsSplit[i]);
}
String childTmpDir = Environment.PWD.$() + Path.SEPARATOR + "tmp";
if (childTmpDir != null) {
vargs.add("-Djava.io.tmpdir=" + childTmpDir);
}
// Setup the log4j prop
long logSize = TaskLog.getTaskLogLength(conf);
setupLog4jProperties(vargs, logSize);
setupLog4jProperties(vargs, logSize, logDir);
if (conf.getProfileEnabled()) {
if (conf.getProfileTaskRange(task.isMapTask()
).isIncluded(task.getPartition())) {
vargs.add(
String.format(
conf.getProfileParams(),
getTaskLogFile(TaskLog.LogName.PROFILE)
)
);
File prof = getTaskLogFile(logDir, TaskLog.LogName.PROFILE);
vargs.add(String.format(conf.getProfileParams(), prof.toString()));
}
}
@ -245,8 +249,8 @@ public class MapReduceChildJVM {
// Finally add the jvmID
vargs.add(String.valueOf(jvmID.getId()));
vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));
vargs.add("1>" + getTaskLogFile(logDir, TaskLog.LogName.STDERR));
vargs.add("2>" + getTaskLogFile(logDir, TaskLog.LogName.STDOUT));
// Final commmand
StringBuilder mergedCommand = new StringBuilder();

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.Credentials;
@ -70,7 +71,7 @@ class YarnChild {
LOG.debug("Child starting");
final JobConf defaultConf = new JobConf();
defaultConf.addResource(MRJobConfig.JOB_CONF_FILE);
defaultConf.addResource(MRConstants.JOB_CONF_FILE);
UserGroupInformation.setConfiguration(defaultConf);
String host = args[0];
@ -237,7 +238,7 @@ class YarnChild {
private static JobConf configureTask(Task task, Credentials credentials,
Token<JobTokenIdentifier> jt) throws IOException {
final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE);
final JobConf job = new JobConf(MRConstants.JOB_CONF_FILE);
job.setCredentials(credentials);
// set tcp nodelay
job.setBoolean("ipc.client.tcpnodelay", true);
@ -259,7 +260,7 @@ class YarnChild {
// Overwrite the localized task jobconf which is linked to in the current
// work-dir.
Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
Path localTaskFile = new Path(Constants.JOBFILE);
writeLocalJobFile(localTaskFile, job);
task.setJobFile(localTaskFile.toString());
task.setConf(job);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
@ -77,7 +78,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.SystemClock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -88,7 +88,6 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.service.AbstractService;
import org.apache.hadoop.yarn.service.CompositeService;
import org.apache.hadoop.yarn.service.Service;
import org.apache.hadoop.yarn.util.ConverterUtils;
/**
* The Map-Reduce Application Master.
@ -115,6 +114,8 @@ public class MRAppMaster extends CompositeService {
private Clock clock;
private final long startTime = System.currentTimeMillis();
private String appName;
private final int startCount;
private final ApplicationId appID;
private final ApplicationAttemptId appAttemptID;
protected final MRAppMetrics metrics;
private Set<TaskId> completedTasksFromPreviousRun;
@ -132,16 +133,21 @@ public class MRAppMaster extends CompositeService {
private Job job;
public MRAppMaster(ApplicationAttemptId applicationAttemptId) {
this(applicationAttemptId, new SystemClock());
public MRAppMaster(ApplicationId applicationId, int startCount) {
this(applicationId, new SystemClock(), startCount);
}
public MRAppMaster(ApplicationAttemptId applicationAttemptId, Clock clock) {
public MRAppMaster(ApplicationId applicationId, Clock clock, int startCount) {
super(MRAppMaster.class.getName());
this.clock = clock;
this.appAttemptID = applicationAttemptId;
this.appID = applicationId;
this.appAttemptID = RecordFactoryProvider.getRecordFactory(null)
.newRecordInstance(ApplicationAttemptId.class);
this.appAttemptID.setApplicationId(appID);
this.appAttemptID.setAttemptId(startCount);
this.startCount = startCount;
this.metrics = MRAppMetrics.create();
LOG.info("Created MRAppMaster for application " + applicationAttemptId);
LOG.info("Created MRAppMaster for application " + applicationId);
}
@Override
@ -153,9 +159,9 @@ public class MRAppMaster extends CompositeService {
appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>");
if (conf.getBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false)
&& appAttemptID.getAttemptId() > 1) {
&& startCount > 1) {
LOG.info("Recovery is enabled. Will try to recover from previous life.");
Recovery recoveryServ = new RecoveryService(appAttemptID, clock);
Recovery recoveryServ = new RecoveryService(appID, clock, startCount);
addIfService(recoveryServ);
dispatcher = recoveryServ.getDispatcher();
clock = recoveryServ.getClock();
@ -237,10 +243,10 @@ public class MRAppMaster extends CompositeService {
// Read the file-system tokens from the localized tokens-file.
Path jobSubmitDir =
FileContext.getLocalFSFileContext().makeQualified(
new Path(new File(MRJobConfig.JOB_SUBMIT_DIR)
new Path(new File(MRConstants.JOB_SUBMIT_DIR)
.getAbsolutePath()));
Path jobTokenFile =
new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
new Path(jobSubmitDir, MRConstants.APPLICATION_TOKENS_FILE);
fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
+ jobTokenFile);
@ -258,8 +264,8 @@ public class MRAppMaster extends CompositeService {
// ////////// End of obtaining the tokens needed by the job. //////////
// create single job
Job newJob = new JobImpl(appAttemptID, conf, dispatcher.getEventHandler(),
taskAttemptListener, jobTokenSecretManager, fsTokens, clock,
Job newJob = new JobImpl(appID, conf, dispatcher.getEventHandler(),
taskAttemptListener, jobTokenSecretManager, fsTokens, clock, startCount,
completedTasksFromPreviousRun, metrics, currentUser.getUserName());
((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
@ -370,11 +376,11 @@ public class MRAppMaster extends CompositeService {
}
public ApplicationId getAppID() {
return appAttemptID.getApplicationId();
return appID;
}
public int getStartCount() {
return appAttemptID.getAttemptId();
return startCount;
}
public AppContext getContext() {
@ -499,7 +505,7 @@ public class MRAppMaster extends CompositeService {
@Override
public ApplicationId getApplicationID() {
return appAttemptID.getApplicationId();
return appID;
}
@Override
@ -549,9 +555,9 @@ public class MRAppMaster extends CompositeService {
// It's more test friendly to put it here.
DefaultMetricsSystem.initialize("MRAppMaster");
// create a job event for job intialization
/** create a job event for job intialization */
JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
// Send init to the job (this does NOT trigger job execution)
/** send init to the job (this does NOT trigger job execution) */
// This is a synchronous call, not an event through dispatcher. We want
// job-init to be done completely here.
jobEventDispatcher.handle(initJobEvent);
@ -642,21 +648,17 @@ public class MRAppMaster extends CompositeService {
public static void main(String[] args) {
try {
String applicationAttemptIdStr = System
.getenv(ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV);
if (applicationAttemptIdStr == null) {
String msg = ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV
+ " is null";
LOG.error(msg);
throw new IOException(msg);
}
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
MRAppMaster appMaster = new MRAppMaster(applicationAttemptId);
//Configuration.addDefaultResource("job.xml");
ApplicationId applicationId = RecordFactoryProvider
.getRecordFactory(null).newRecordInstance(ApplicationId.class);
applicationId.setClusterTimestamp(Long.valueOf(args[0]));
applicationId.setId(Integer.valueOf(args[1]));
int failCount = Integer.valueOf(args[2]);
MRAppMaster appMaster = new MRAppMaster(applicationId, failCount);
Runtime.getRuntime().addShutdownHook(
new CompositeServiceShutdownHook(appMaster));
YarnConfiguration conf = new YarnConfiguration(new JobConf());
conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));
conf.addResource(new Path(MRConstants.JOB_CONF_FILE));
conf.set(MRJobConfig.USER_NAME,
System.getProperty("user.name"));
UserGroupInformation.setConfiguration(conf);

View File

@ -149,7 +149,7 @@ public class MRClientService extends AbstractService
+ ":" + server.getPort());
LOG.info("Instantiated MRClientService at " + this.bindAddress);
try {
webApp = WebApps.$for("mapreduce", AppContext.class, appContext).with(conf).
webApp = WebApps.$for("yarn", AppContext.class, appContext).with(conf).
start(new AMWebApp());
} catch (Exception e) {
LOG.error("Webapps failed to start. Ignoring for now:", e);

View File

@ -64,6 +64,7 @@ import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
@ -92,7 +93,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
@ -101,7 +101,6 @@ import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
@ -130,11 +129,11 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
RecordFactoryProvider.getRecordFactory(null);
//final fields
private final ApplicationAttemptId applicationAttemptId;
private final Clock clock;
private final JobACLsManager aclsManager;
private final String username;
private final Map<JobACL, AccessControlList> jobACLs;
private final int startCount;
private final Set<TaskId> completedTasksFromPreviousRun;
private final Lock readLock;
private final Lock writeLock;
@ -366,26 +365,26 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private Token<JobTokenIdentifier> jobToken;
private JobTokenSecretManager jobTokenSecretManager;
public JobImpl(ApplicationAttemptId applicationAttemptId, Configuration conf,
public JobImpl(ApplicationId appID, Configuration conf,
EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
JobTokenSecretManager jobTokenSecretManager,
Credentials fsTokenCredentials, Clock clock,
Credentials fsTokenCredentials, Clock clock, int startCount,
Set<TaskId> completedTasksFromPreviousRun, MRAppMetrics metrics,
String userName) {
this.applicationAttemptId = applicationAttemptId;
this.jobId = recordFactory.newRecordInstance(JobId.class);
this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
this.conf = conf;
this.metrics = metrics;
this.clock = clock;
this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
this.startCount = startCount;
this.userName = userName;
ApplicationId applicationId = applicationAttemptId.getApplicationId();
jobId.setAppId(applicationId);
jobId.setId(applicationId.getId());
jobId.setAppId(appID);
jobId.setId(appID.getId());
oldJobId = TypeConverter.fromYarn(jobId);
LOG.info("Job created" +
" appId=" + applicationId +
" appId=" + appID +
" jobId=" + jobId +
" oldJobId=" + oldJobId);
@ -585,17 +584,25 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
public JobReport getReport() {
readLock.lock();
try {
JobState state = getState();
JobReport report = recordFactory.newRecordInstance(JobReport.class);
report.setJobId(jobId);
report.setJobState(getState());
if (getState() == JobState.NEW) {
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
startTime, finishTime, setupProgress, 0.0f,
0.0f, cleanupProgress);
// TODO - Fix to correctly setup report and to check state
if (report.getJobState() == JobState.NEW) {
return report;
}
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
startTime, finishTime, setupProgress, computeProgress(mapTasks),
computeProgress(reduceTasks), cleanupProgress);
report.setStartTime(startTime);
report.setFinishTime(finishTime);
report.setSetupProgress(setupProgress);
report.setCleanupProgress(cleanupProgress);
report.setMapProgress(computeProgress(mapTasks));
report.setReduceProgress(computeProgress(reduceTasks));
report.setJobName(jobName);
report.setUser(username);
return report;
} finally {
readLock.unlock();
}
@ -1000,7 +1007,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
FileSystem.get(job.conf).makeQualified(
new Path(path, oldJobIDString));
job.remoteJobConfFile =
new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
new Path(job.remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
// Prepare the TaskAttemptListener server for authentication of Containers
// TaskAttemptListener gets the information via jobTokenSecretManager.
@ -1026,7 +1033,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
Path remoteJobTokenFile =
new Path(job.remoteJobSubmitDir,
MRJobConfig.APPLICATION_TOKENS_FILE);
MRConstants.APPLICATION_TOKENS_FILE);
tokenStorage.writeTokenStorageFile(remoteJobTokenFile, job.conf);
LOG.info("Writing back the job-token file on the remote file system:"
+ remoteJobTokenFile.toString());
@ -1071,8 +1078,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
job.conf, splits[i],
job.taskAttemptListener,
job.committer, job.jobToken, job.fsTokens.getAllTokens(),
job.clock, job.completedTasksFromPreviousRun,
job.applicationAttemptId.getAttemptId(),
job.clock, job.completedTasksFromPreviousRun, job.startCount,
job.metrics);
job.addTask(task);
}
@ -1089,9 +1095,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
job.conf, job.numMapTasks,
job.taskAttemptListener, job.committer, job.jobToken,
job.fsTokens.getAllTokens(), job.clock,
job.completedTasksFromPreviousRun,
job.applicationAttemptId.getAttemptId(),
job.metrics);
job.completedTasksFromPreviousRun, job.startCount, job.metrics);
job.addTask(task);
}
LOG.info("Number of reduces for job " + job.jobId + " = "

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
@ -61,6 +62,7 @@ import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
@ -101,7 +103,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerToken;
@ -116,6 +117,7 @@ import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.RackResolver;
@ -151,7 +153,7 @@ public abstract class TaskAttemptImpl implements
private Token<JobTokenIdentifier> jobToken;
private static AtomicBoolean initialClasspathFlag = new AtomicBoolean();
private static String initialClasspath = null;
private static final Object classpathLock = new Object();
private final Object classpathLock = new Object();
private long launchTime;
private long finishTime;
private WrappedProgressSplitsBlock progressSplitBlock;
@ -516,8 +518,8 @@ public abstract class TaskAttemptImpl implements
return initialClasspath;
}
Map<String, String> env = new HashMap<String, String>();
MRApps.setClasspath(env);
initialClasspath = env.get(Environment.CLASSPATH.name());
MRApps.setInitialClasspath(env);
initialClasspath = env.get(MRApps.CLASSPATH);
initialClasspathFlag.set(true);
return initialClasspath;
}
@ -529,6 +531,9 @@ public abstract class TaskAttemptImpl implements
*/
private ContainerLaunchContext createContainerLaunchContext() {
ContainerLaunchContext container =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
// Application resources
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
@ -536,11 +541,6 @@ public abstract class TaskAttemptImpl implements
// Application environment
Map<String, String> environment = new HashMap<String, String>();
// Service data
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
// Tokens
ByteBuffer tokens = ByteBuffer.wrap(new byte[]{});
try {
FileSystem remoteFS = FileSystem.get(conf);
@ -550,7 +550,7 @@ public abstract class TaskAttemptImpl implements
MRJobConfig.JAR))).makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory());
localResources.put(
MRJobConfig.JOB_JAR,
MRConstants.JOB_JAR,
createLocalResource(remoteFS, recordFactory, remoteJobJar,
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
LOG.info("The job-jar file on the remote FS is "
@ -570,9 +570,9 @@ public abstract class TaskAttemptImpl implements
Path remoteJobSubmitDir =
new Path(path, oldJobId.toString());
Path remoteJobConfPath =
new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
new Path(remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
localResources.put(
MRJobConfig.JOB_CONF_FILE,
MRConstants.JOB_CONF_FILE,
createLocalResource(remoteFS, recordFactory, remoteJobConfPath,
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
LOG.info("The job-conf file on the remote FS is "
@ -580,7 +580,11 @@ public abstract class TaskAttemptImpl implements
// //////////// End of JobConf setup
// Setup DistributedCache
MRApps.setupDistributedCache(conf, localResources);
MRApps.setupDistributedCache(conf, localResources, environment);
// Set local-resources and environment
container.setLocalResources(localResources);
container.setEnvironment(environment);
// Setup up tokens
Credentials taskCredentials = new Credentials();
@ -602,43 +606,52 @@ public abstract class TaskAttemptImpl implements
LOG.info("Size of containertokens_dob is "
+ taskCredentials.numberOfTokens());
taskCredentials.writeTokenStorageToStream(containerTokens_dob);
tokens =
container.setContainerTokens(
ByteBuffer.wrap(containerTokens_dob.getData(), 0,
containerTokens_dob.getLength());
containerTokens_dob.getLength()));
// Add shuffle token
LOG.info("Putting shuffle token in serviceData");
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
ShuffleHandler.serializeServiceData(jobToken));
container.setServiceData(serviceData);
MRApps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
getInitialClasspath());
MRApps.addToClassPath(container.getEnvironment(), getInitialClasspath());
} catch (IOException e) {
throw new YarnException(e);
}
// Setup environment
MapReduceChildJVM.setVMEnv(environment, remoteTask);
container.setContainerId(containerID);
container.setUser(conf.get(MRJobConfig.USER_NAME)); // TODO: Fix
// Set up the launch command
List<String> commands = MapReduceChildJVM.getVMCommand(
taskAttemptListener.getAddress(), remoteTask,
jvmID);
File workDir = new File("$PWD"); // Will be expanded by the shell.
String containerLogDir =
new File(ApplicationConstants.LOG_DIR_EXPANSION_VAR).toString();
String childTmpDir = new File(workDir, "tmp").toString();
String javaHome = "${JAVA_HOME}"; // Will be expanded by the shell.
String nmLdLibraryPath = "{LD_LIBRARY_PATH}"; // Expanded by the shell?
List<String> classPaths = new ArrayList<String>();
String localizedApplicationTokensFile =
new File(workDir, MRConstants.APPLICATION_TOKENS_FILE).toString();
classPaths.add(MRConstants.JOB_JAR);
classPaths.add(MRConstants.YARN_MAPREDUCE_APP_JAR_PATH);
classPaths.add(workDir.toString()); // TODO
// Construct the actual Container
container.setCommands(MapReduceChildJVM.getVMCommand(
taskAttemptListener.getAddress(), remoteTask, javaHome,
workDir.toString(), containerLogDir, childTmpDir, jvmID));
MapReduceChildJVM.setVMEnv(container.getEnvironment(), classPaths,
workDir.toString(), containerLogDir, nmLdLibraryPath, remoteTask,
localizedApplicationTokensFile);
// Construct the actual Container
ContainerLaunchContext container =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
container.setContainerId(containerID);
container.setUser(conf.get(MRJobConfig.USER_NAME));
container.setResource(assignedCapability);
container.setLocalResources(localResources);
container.setEnvironment(environment);
container.setCommands(commands);
container.setServiceData(serviceData);
container.setContainerTokens(tokens);
return container;
}

View File

@ -73,8 +73,6 @@ public class ContainerLauncherImpl extends AbstractService implements
private AppContext context;
private ThreadPoolExecutor launcherPool;
private static final int INITIAL_POOL_SIZE = 10;
private int limitOnPoolSize;
private Thread eventHandlingThread;
private BlockingQueue<ContainerLauncherEvent> eventQueue =
new LinkedBlockingQueue<ContainerLauncherEvent>();
@ -98,17 +96,16 @@ public class ContainerLauncherImpl extends AbstractService implements
YarnConfiguration.YARN_SECURITY_INFO,
ContainerManagerSecurityInfo.class, SecurityInfo.class);
this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
this.limitOnPoolSize = conf.getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
super.init(myLocalConfig);
}
public void start() {
// Start with a default core-pool size of 10 and change it dynamically.
launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE,
launcherPool =
new ThreadPoolExecutor(getConfig().getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT, 10),
Integer.MAX_VALUE, 1, TimeUnit.HOURS,
new LinkedBlockingQueue<Runnable>());
launcherPool.prestartAllCoreThreads(); // Wait for work.
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
@ -120,26 +117,6 @@ public class ContainerLauncherImpl extends AbstractService implements
LOG.error("Returning, interrupted : " + e);
return;
}
int poolSize = launcherPool.getCorePoolSize();
// See if we need up the pool size only if haven't reached the
// maximum limit yet.
if (poolSize != limitOnPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int numNodes = ugiMap.size();
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
if (poolSize <= idealPoolSize) {
// Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
// later is just a buffer so we are not always increasing the
// pool-size
launcherPool.setCorePoolSize(idealPoolSize + INITIAL_POOL_SIZE);
}
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(new EventProcessor(event));

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.mapreduce.v2.app.local;
import java.util.ArrayList;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
@ -31,19 +30,15 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
/**
@ -70,20 +65,6 @@ public class LocalContainerAllocator extends RMCommunicator
this.appID = context.getApplicationID();
}
@Override
protected synchronized void heartbeat() throws Exception {
AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
this.applicationAttemptId, this.lastResponseID, super
.getApplicationProgress(), new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>());
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
AMResponse response = allocateResponse.getAMResponse();
if (response.getReboot()) {
// TODO
LOG.info("Event from RM: shutting down Application Master");
}
}
@Override
public void handle(ContainerAllocatorEvent event) {
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {

View File

@ -58,7 +58,7 @@ import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
@ -92,9 +92,10 @@ public class RecoveryService extends CompositeService implements Recovery {
private static final Log LOG = LogFactory.getLog(RecoveryService.class);
private final ApplicationAttemptId applicationAttemptId;
private final ApplicationId appID;
private final Dispatcher dispatcher;
private final ControlledClock clock;
private final int startCount;
private JobInfo jobInfo = null;
private final Map<TaskId, TaskInfo> completedTasks =
@ -105,10 +106,10 @@ public class RecoveryService extends CompositeService implements Recovery {
private volatile boolean recoveryMode = false;
public RecoveryService(ApplicationAttemptId applicationAttemptId,
Clock clock) {
public RecoveryService(ApplicationId appID, Clock clock, int startCount) {
super("RecoveringDispatcher");
this.applicationAttemptId = applicationAttemptId;
this.appID = appID;
this.startCount = startCount;
this.dispatcher = new RecoveryDispatcher();
this.clock = new ControlledClock(clock);
addService((Service) dispatcher);
@ -151,8 +152,7 @@ public class RecoveryService extends CompositeService implements Recovery {
private void parse() throws IOException {
// TODO: parse history file based on startCount
String jobName =
TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
String jobName = TypeConverter.fromYarn(appID).toString();
String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(getConfig());
FSDataInputStream in = null;
Path historyFile = null;
@ -160,9 +160,8 @@ public class RecoveryService extends CompositeService implements Recovery {
new Path(jobhistoryDir));
FileContext fc = FileContext.getFileContext(histDirPath.toUri(),
getConfig());
//read the previous history file
historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
histDirPath, jobName, (applicationAttemptId.getAttemptId() - 1)));
histDirPath, jobName, startCount - 1)); //read the previous history file
in = fc.open(historyFile);
JobHistoryParser parser = new JobHistoryParser(in);
jobInfo = parser.parse();

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.rm;
import java.io.IOException;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -28,7 +29,6 @@ import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
@ -42,12 +42,17 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.AMRMProtocol;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
@ -59,7 +64,7 @@ import org.apache.hadoop.yarn.service.AbstractService;
/**
* Registers/unregisters to RM and sends heartbeats to RM.
*/
public abstract class RMCommunicator extends AbstractService {
public class RMCommunicator extends AbstractService {
private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
private int rmPollInterval;//millis
protected ApplicationId applicationId;
@ -69,7 +74,7 @@ public abstract class RMCommunicator extends AbstractService {
protected EventHandler eventHandler;
protected AMRMProtocol scheduler;
private final ClientService clientService;
protected int lastResponseID;
private int lastResponseID;
private Resource minContainerCapability;
private Resource maxContainerCapability;
@ -116,34 +121,6 @@ public abstract class RMCommunicator extends AbstractService {
return job;
}
/**
* Get the appProgress. Can be used only after this component is started.
* @return the appProgress.
*/
protected float getApplicationProgress() {
// For now just a single job. In future when we have a DAG, we need an
// aggregate progress.
JobReport report = this.job.getReport();
float setupWeight = 0.05f;
float cleanupWeight = 0.05f;
float mapWeight = 0.0f;
float reduceWeight = 0.0f;
int numMaps = this.job.getTotalMaps();
int numReduces = this.job.getTotalReduces();
if (numMaps == 0 && numReduces == 0) {
} else if (numMaps == 0) {
reduceWeight = 0.9f;
} else if (numReduces == 0) {
mapWeight = 0.9f;
} else {
mapWeight = reduceWeight = 0.45f;
}
return (report.getSetupProgress() * setupWeight
+ report.getCleanupProgress() * cleanupWeight
+ report.getMapProgress() * mapWeight + report.getReduceProgress()
* reduceWeight);
}
protected void register() {
//Register
String host =
@ -285,5 +262,18 @@ public abstract class RMCommunicator extends AbstractService {
});
}
protected abstract void heartbeat() throws Exception;
protected synchronized void heartbeat() throws Exception {
AllocateRequest allocateRequest =
recordFactory.newRecordInstance(AllocateRequest.class);
allocateRequest.setApplicationAttemptId(applicationAttemptId);
allocateRequest.setResponseId(lastResponseID);
allocateRequest.addAllAsks(new ArrayList<ResourceRequest>());
allocateRequest.addAllReleases(new ArrayList<ContainerId>());
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
AMResponse response = allocateResponse.getAMResponse();
if (response.getReboot()) {
LOG.info("Event from RM: shutting down Application Master");
}
}
}

View File

@ -586,19 +586,35 @@ public class RMContainerAllocator extends RMContainerRequestor
private ContainerRequest assign(Container allocated) {
ContainerRequest assigned = null;
Priority priority = allocated.getPriority();
if (PRIORITY_FAST_FAIL_MAP.equals(priority)) {
LOG.info("Assigning container " + allocated + " to fast fail map");
if (mapResourceReqt != reduceResourceReqt) {
//assign based on size
LOG.info("Assigning based on container size");
if (allocated.getResource().getMemory() == mapResourceReqt) {
assigned = assignToFailedMap(allocated);
} else if (PRIORITY_REDUCE.equals(priority)) {
LOG.info("Assigning container " + allocated + " to reduce");
assigned = assignToReduce(allocated);
} else if (PRIORITY_MAP.equals(priority)) {
LOG.info("Assigning container " + allocated + " to map");
if (assigned == null) {
assigned = assignToMap(allocated);
}
} else if (allocated.getResource().getMemory() == reduceResourceReqt) {
assigned = assignToReduce(allocated);
}
return assigned;
}
//container can be given to either map or reduce
//assign based on priority
//try to assign to earlierFailedMaps if present
assigned = assignToFailedMap(allocated);
//Assign to reduces before assigning to maps ?
if (assigned == null) {
assigned = assignToReduce(allocated);
}
//try to assign to maps if present
if (assigned == null) {
assigned = assignToMap(allocated);
} else {
LOG.warn("Container allocated at unwanted priority: " + priority +
". Returning to RM...");
}
return assigned;

View File

@ -43,7 +43,6 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.util.BuilderUtils;
/**
* Keeps the data structures to send container requests to RM.
@ -108,11 +107,15 @@ public abstract class RMContainerRequestor extends RMCommunicator {
LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode);
}
protected abstract void heartbeat() throws Exception;
protected AMResponse makeRemoteRequest() throws YarnRemoteException {
AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
applicationAttemptId, lastResponseID, super.getApplicationProgress(),
new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(
release));
AllocateRequest allocateRequest = recordFactory
.newRecordInstance(AllocateRequest.class);
allocateRequest.setApplicationAttemptId(applicationAttemptId);
allocateRequest.setResponseId(lastResponseID);
allocateRequest.addAllAsks(new ArrayList<ResourceRequest>(ask));
allocateRequest.addAllReleases(new ArrayList<ContainerId>(release));
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
AMResponse response = allocateResponse.getAMResponse();
lastResponseID = response.getResponseId();

View File

@ -35,6 +35,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
@ -86,7 +87,7 @@ public class DefaultSpeculator extends AbstractService implements
private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds
= new ConcurrentHashMap<JobId, AtomicInteger>();
private final Set<TaskId> mayHaveSpeculated = new HashSet<TaskId>();
private final Set<TaskId> mayHaveSpeculated = new HashSet();
private final Configuration conf;
private AppContext context;

View File

@ -44,7 +44,6 @@ public class JobConfPage extends AppView {
set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
: join("Configuration for MapReduce Job ", $(JOB_ID)));
commonPreHead(html);
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
set(DATATABLES_ID, "conf");
set(initID(DATATABLES, "conf"), confTableInit());
set(postInitID(DATATABLES, "conf"), confPostTableInit());

View File

@ -38,9 +38,9 @@ public class NavBlock extends HtmlBlock {
div("#nav").
h3("Cluster").
ul().
li().a(url(rmweb, "cluster", "cluster"), "About")._().
li().a(url(rmweb, "cluster", "apps"), "Applications")._().
li().a(url(rmweb, "cluster", "scheduler"), "Scheduler")._()._().
li().a(url(rmweb, prefix(), "cluster"), "About")._().
li().a(url(rmweb, prefix(), "apps"), "Applications")._().
li().a(url(rmweb, prefix(), "scheduler"), "Scheduler")._()._().
h3("Application").
ul().
li().a(url("app/info"), "About")._().

View File

@ -85,7 +85,7 @@ public class TaskPage extends AppView {
if (containerId != null) {
String containerIdStr = ConverterUtils.toString(containerId);
nodeTd._(" ").
a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
a(".logslink", url("http://", nodeHttpAddr, "yarn", "containerlogs",
containerIdStr), "logs");
}
nodeTd._().

View File

@ -66,7 +66,6 @@ import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -92,7 +91,7 @@ public class MRApp extends MRAppMaster {
private File testWorkDir;
private Path testAbsPath;
private static final RecordFactory recordFactory =
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
//if true, tasks complete automatically as soon as they are launched
@ -101,7 +100,7 @@ public class MRApp extends MRAppMaster {
static ApplicationId applicationId;
static {
applicationId = recordFactory.newRecordInstance(ApplicationId.class);
applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
applicationId.setClusterTimestamp(0);
applicationId.setId(0);
}
@ -110,18 +109,8 @@ public class MRApp extends MRAppMaster {
this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
}
private static ApplicationAttemptId getApplicationAttemptId(
ApplicationId applicationId, int startCount) {
ApplicationAttemptId applicationAttemptId =
recordFactory.newRecordInstance(ApplicationAttemptId.class);
applicationAttemptId.setApplicationId(applicationId);
applicationAttemptId.setAttemptId(startCount);
return applicationAttemptId;
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount) {
super(getApplicationAttemptId(applicationId, startCount));
public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart, int startCount) {
super(applicationId, startCount);
this.testWorkDir = new File("target", testName);
testAbsPath = new Path(testWorkDir.getAbsolutePath());
LOG.info("PathUsed: " + testAbsPath);
@ -402,12 +391,11 @@ public class MRApp extends MRAppMaster {
return localStateMachine;
}
public TestJob(Configuration conf, ApplicationId applicationId,
public TestJob(Configuration conf, ApplicationId appID,
EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
Clock clock, String user) {
super(getApplicationAttemptId(applicationId, getStartCount()),
conf, eventHandler, taskAttemptListener,
new JobTokenSecretManager(), new Credentials(), clock,
super(appID, conf, eventHandler, taskAttemptListener,
new JobTokenSecretManager(), new Credentials(), clock, getStartCount(),
getCompletedTaskFromPreviousRun(), metrics, user);
// This "this leak" is okay because the retained pointer is in an

View File

@ -47,7 +47,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationState;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@ -281,28 +280,16 @@ public class TypeConverter {
}
public static org.apache.hadoop.mapred.JobStatus fromYarn(
JobReport jobreport, String jobFile) {
JobReport jobreport, String jobFile, String trackingUrl) {
JobPriority jobPriority = JobPriority.NORMAL;
org.apache.hadoop.mapred.JobStatus jobStatus =
new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
return new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
jobreport.getSetupProgress(), jobreport.getMapProgress(),
jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
fromYarn(jobreport.getJobState()),
jobPriority, jobreport.getUser(), jobreport.getJobName(),
jobFile, jobreport.getTrackingUrl());
jobStatus.setFailureInfo(jobreport.getDiagnostics());
return jobStatus;
jobFile, trackingUrl);
}
public static org.apache.hadoop.mapreduce.QueueState fromYarn(
QueueState state) {
org.apache.hadoop.mapreduce.QueueState qState =
org.apache.hadoop.mapreduce.QueueState.getState(
state.toString().toLowerCase());
return qState;
}
public static int fromYarn(JobState state) {
switch (state) {
case NEW:
@ -425,7 +412,6 @@ public class TypeConverter {
);
jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
jobStatus.setStartTime(application.getStartTime());
jobStatus.setFailureInfo(application.getDiagnostics());
return jobStatus;
}
@ -445,9 +431,9 @@ public class TypeConverter {
public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo
queueInfo, Configuration conf) {
return new QueueInfo(queueInfo.getQueueName(),queueInfo.toString(),
fromYarn(queueInfo.getQueueState()), TypeConverter.fromYarnApps(
queueInfo.getApplications(), conf));
return new QueueInfo(queueInfo.getQueueName(),
queueInfo.toString(), QueueState.RUNNING,
TypeConverter.fromYarnApps(queueInfo.getApplications(), conf));
}
public static QueueInfo[] fromYarnQueueInfo(

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface MRConstants {
// This should be the directory where splits file gets localized on the node
// running ApplicationMaster.
public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
// This should be the name of the localized job-configuration file on the node
// running ApplicationMaster and Task
public static final String JOB_CONF_FILE = "job.xml";
// This should be the name of the localized job-jar file on the node running
// individual containers/tasks.
public static final String JOB_JAR = "job.jar";
public static final String HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME =
"hadoop-mapreduce-client-app-0.24.0-SNAPSHOT.jar";
public static final String YARN_MAPREDUCE_APP_JAR_PATH =
"$YARN_HOME/modules/" + HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
// The token file for the application. Should contain tokens for access to
// remote file system and may optionally contain application specific tokens.
// For now, generated by the AppManagers and used by NodeManagers and the
// Containers.
public static final String APPLICATION_TOKENS_FILE = "appTokens";
}

View File

@ -29,8 +29,6 @@ public interface JobReport {
public abstract long getFinishTime();
public abstract String getUser();
public abstract String getJobName();
public abstract String getTrackingUrl();
public abstract String getDiagnostics();
public abstract void setJobId(JobId jobId);
public abstract void setJobState(JobState jobState);
@ -42,6 +40,4 @@ public interface JobReport {
public abstract void setFinishTime(long finishTime);
public abstract void setUser(String user);
public abstract void setJobName(String jobName);
public abstract void setTrackingUrl(String trackingUrl);
public abstract void setDiagnostics(String diagnostics);
}

View File

@ -206,30 +206,6 @@ public class JobReportPBImpl extends ProtoBase<JobReportProto> implements JobRep
builder.setJobName((jobName));
}
@Override
public String getTrackingUrl() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getTrackingUrl());
}
@Override
public void setTrackingUrl(String trackingUrl) {
maybeInitBuilder();
builder.setTrackingUrl(trackingUrl);
}
@Override
public String getDiagnostics() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getDiagnostics();
}
@Override
public void setDiagnostics(String diagnostics) {
maybeInitBuilder();
builder.setDiagnostics(diagnostics);
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}

View File

@ -489,7 +489,7 @@ public class JobHistoryUtils {
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
sb.append("/jobhistory/job/"); // TODO This will change when the history server
sb.append("/yarn/job/"); // TODO This will change when the history server
// understands apps.
// TOOD Use JobId toString once UI stops using _id_id
sb.append("job_").append(appId.getClusterTimestamp());

View File

@ -39,14 +39,14 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
@ -167,7 +167,7 @@ public class MRApps extends Apps {
return TaskAttemptStateUI.valueOf(attemptStateStr);
}
private static void setMRFrameworkClasspath(
public static void setInitialClasspath(
Map<String, String> environment) throws IOException {
InputStream classpathFileStream = null;
BufferedReader reader = null;
@ -182,17 +182,30 @@ public class MRApps extends Apps {
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
String cp = reader.readLine();
if (cp != null) {
addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
addToClassPath(environment, cp.trim());
}
// Put the file itself on classpath for tasks.
addToEnvironment(
environment,
Environment.CLASSPATH.name(),
addToClassPath(environment,
thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
// Add standard Hadoop classes
for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
addToEnvironment(environment, Environment.CLASSPATH.name(), c);
// If runtime env is different.
if (System.getenv().get("YARN_HOME") != null) {
ShellCommandExecutor exec =
new ShellCommandExecutor(new String[] {
System.getenv().get("YARN_HOME") + "/bin/yarn",
"classpath" });
exec.execute();
addToClassPath(environment, exec.getOutput().trim());
}
// Get yarn mapreduce-app classpath
if (System.getenv().get("HADOOP_MAPRED_HOME")!= null) {
ShellCommandExecutor exec =
new ShellCommandExecutor(new String[] {
System.getenv().get("HADOOP_MAPRED_HOME") + "/bin/mapred",
"classpath" });
exec.execute();
addToClassPath(environment, exec.getOutput().trim());
}
} finally {
if (classpathFileStream != null) {
@ -205,33 +218,18 @@ public class MRApps extends Apps {
// TODO: Remove duplicates.
}
private static final String SYSTEM_PATH_SEPARATOR =
System.getProperty("path.separator");
public static void addToEnvironment(
Map<String, String> environment,
String variable, String value) {
String val = environment.get(variable);
if (val == null) {
val = value;
public static void addToClassPath(
Map<String, String> environment, String fileName) {
String classpath = environment.get(CLASSPATH);
if (classpath == null) {
classpath = fileName;
} else {
val = val + SYSTEM_PATH_SEPARATOR + value;
classpath = classpath + ":" + fileName;
}
environment.put(variable, val);
environment.put(CLASSPATH, classpath);
}
public static void setClasspath(Map<String, String> environment)
throws IOException {
MRApps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
MRJobConfig.JOB_JAR);
MRApps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
Environment.PWD.$() + Path.SEPARATOR + "*");
MRApps.setMRFrameworkClasspath(environment);
}
public static final String CLASSPATH = "CLASSPATH";
private static final String STAGING_CONSTANT = ".staging";
public static Path getStagingAreaDir(Configuration conf, String user) {
@ -243,7 +241,7 @@ public class MRApps extends Apps {
public static String getJobFile(Configuration conf, String user,
org.apache.hadoop.mapreduce.JobID jobId) {
Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
jobId.toString() + Path.SEPARATOR + MRConstants.JOB_CONF_FILE);
return jobFile.toString();
}
@ -262,11 +260,12 @@ public class MRApps extends Apps {
public static void setupDistributedCache(
Configuration conf,
Map<String, LocalResource> localResources)
Map<String, LocalResource> localResources,
Map<String, String> env)
throws IOException {
// Cache archives
parseDistributedCacheArtifacts(conf, localResources,
parseDistributedCacheArtifacts(conf, localResources, env,
LocalResourceType.ARCHIVE,
DistributedCache.getCacheArchives(conf),
parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)),
@ -276,7 +275,7 @@ public class MRApps extends Apps {
// Cache files
parseDistributedCacheArtifacts(conf,
localResources,
localResources, env,
LocalResourceType.FILE,
DistributedCache.getCacheFiles(conf),
parseTimeStamps(DistributedCache.getFileTimestamps(conf)),
@ -291,6 +290,7 @@ public class MRApps extends Apps {
private static void parseDistributedCacheArtifacts(
Configuration conf,
Map<String, LocalResource> localResources,
Map<String, String> env,
LocalResourceType type,
URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[],
Path[] pathsToPutOnClasspath) throws IOException {
@ -339,6 +339,9 @@ public class MRApps extends Apps {
: LocalResourceVisibility.PRIVATE,
sizes[i], timestamps[i])
);
if (classPaths.containsKey(u.getPath())) {
MRApps.addToClassPath(env, linkName);
}
}
}
}
@ -356,42 +359,6 @@ public class MRApps extends Apps {
return result;
}
public static void setEnvFromInputString(Map<String, String> env,
String envString) {
if (envString != null && envString.length() > 0) {
String childEnvs[] = envString.split(",");
for (String cEnv : childEnvs) {
String[] parts = cEnv.split("="); // split on '='
String value = env.get(parts[0]);
if (value != null) {
// Replace $env with the child's env constructed by NM's
// For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
value = parts[1].replace("$" + parts[0], value);
} else {
// example PATH=$PATH:/tmp
value = System.getenv(parts[0]);
if (value != null) {
// the env key is present in the tt's env
value = parts[1].replace("$" + parts[0], value);
} else {
// check for simple variable substitution
// for e.g. ROOT=$HOME
String envValue = System.getenv(parts[1].substring(1));
if (envValue != null) {
value = envValue;
} else {
// the env key is note present anywhere .. simply set it
// example X=$X:/tmp or X=/tmp
value = parts[1].replace("$" + parts[0], "");
}
}
}
addToEnvironment(env, parts[0], value);
}
}
}
}

View File

@ -19,25 +19,27 @@
package org.apache.hadoop.mapreduce.v2.util;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
public class MRBuilderUtils {
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
public static JobId newJobId(ApplicationId appId, int id) {
JobId jobId = Records.newRecord(JobId.class);
JobId jobId = recordFactory.newRecordInstance(JobId.class);
jobId.setAppId(appId);
jobId.setId(id);
return jobId;
}
public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
TaskId taskId = Records.newRecord(TaskId.class);
TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
taskId.setJobId(jobId);
taskId.setId(id);
taskId.setTaskType(taskType);
@ -46,27 +48,9 @@ public class MRBuilderUtils {
public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) {
TaskAttemptId taskAttemptId =
Records.newRecord(TaskAttemptId.class);
recordFactory.newRecordInstance(TaskAttemptId.class);
taskAttemptId.setTaskId(taskId);
taskAttemptId.setId(attemptId);
return taskAttemptId;
}
public static JobReport newJobReport(JobId jobId, String jobName,
String userName, JobState state, long startTime, long finishTime,
float setupProgress, float mapProgress, float reduceProgress,
float cleanupProgress) {
JobReport report = Records.newRecord(JobReport.class);
report.setJobId(jobId);
report.setJobName(jobName);
report.setUser(userName);
report.setJobState(state);
report.setStartTime(startTime);
report.setFinishTime(finishTime);
report.setSetupProgress(setupProgress);
report.setCleanupProgress(cleanupProgress);
report.setMapProgress(mapProgress);
report.setReduceProgress(reduceProgress);
return report;
}
}

View File

@ -143,8 +143,6 @@ message JobReportProto {
optional int64 finish_time = 8;
optional string user = 9;
optional string jobName = 10;
optional string trackingUrl = 11;
optional string diagnostics = 12;
}
enum TaskAttemptCompletionEventStatusProto {

View File

@ -19,14 +19,11 @@ package org.apache.hadoop.mapreduce;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationState;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.junit.Test;
@ -70,14 +67,4 @@ public class TestTypeConverter {
Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId());
Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState());
}
@Test
public void testFromYarnQueueInfo() {
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = new QueueInfoPBImpl();
queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED);
org.apache.hadoop.mapreduce.QueueInfo returned =
TypeConverter.fromYarn(queueInfo, new Configuration());
Assert.assertEquals("queueInfo translation didn't work.",
returned.getState().toString(), queueInfo.getQueueState().toString().toLowerCase());
}
}

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.MRConstants;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -114,8 +115,7 @@ public class TestMRApps {
@Test public void testGetJobFileWithUser() {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/my/path/to/staging");
String jobFile = MRApps.getJobFile(conf, "dummy-user",
new JobID("dummy-job", 12345));
String jobFile = MRApps.getJobFile(conf, "dummy-user", new JobID("dummy-job", 12345));
assertNotNull("getJobFile results in null.", jobFile);
assertEquals("jobFile with specified user is not as expected.",
"/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile);

View File

@ -41,7 +41,6 @@ import org.apache.hadoop.mapred.IFile.Reader;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapred.Merger.Segment;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
@ -561,7 +560,7 @@ public class BackupStore<K,V> {
private Writer<K,V> createSpillFile() throws IOException {
Path tmp =
new Path(MRJobConfig.OUTPUT + "/backup_" + tid.getId() + "_"
new Path(Constants.OUTPUT + "/backup_" + tid.getId() + "_"
+ (spillNumber++) + ".out");
LOG.info("Created file: " + tmp);

View File

@ -16,21 +16,12 @@
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
package org.apache.hadoop.mapred;
import org.apache.hadoop.yarn.api.records.ApplicationId;
public class RMAppFailedAttemptEvent extends RMAppEvent {
private final String diagnostics;
public RMAppFailedAttemptEvent(ApplicationId appId, RMAppEventType event,
String diagnostics) {
super(appId, event);
this.diagnostics = diagnostics;
}
public String getDiagnostics() {
return this.diagnostics;
}
public class Constants {
static final String OUTPUT = "output";
public static final String HADOOP_WORK_DIR = "HADOOP_WORK_DIR";
public static final String JOBFILE = "job.xml";
public static final String STDOUT_LOGFILE_ENV = "STDOUT_LOGFILE_ENV";
public static final String STDERR_LOGFILE_ENV = "STDERR_LOGFILE_ENV";
}

Some files were not shown because too many files have changed in this diff Show More