Merge r1441206 through r1444434 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1444439 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-02-09 21:29:44 +00:00
commit fa8bafbd46
147 changed files with 4302 additions and 996 deletions

View File

@ -91,13 +91,19 @@
<includes> <includes>
<include>${project.artifactId}-${project.version}.jar</include> <include>${project.artifactId}-${project.version}.jar</include>
<include>${project.artifactId}-${project.version}-tests.jar</include> <include>${project.artifactId}-${project.version}-tests.jar</include>
<include>${project.artifactId}-${project.version}-sources.jar</include>
<include>${project.artifactId}-${project.version}-test-sources.jar</include>
</includes> </includes>
<excludes> <excludes>
<exclude>hadoop-tools-dist-*.jar</exclude> <exclude>hadoop-tools-dist-*.jar</exclude>
</excludes> </excludes>
</fileSet> </fileSet>
<fileSet>
<directory>${project.build.directory}</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
<includes>
<include>${project.artifactId}-${project.version}-sources.jar</include>
<include>${project.artifactId}-${project.version}-test-sources.jar</include>
</includes>
</fileSet>
<fileSet> <fileSet>
<directory>${basedir}/dev-support/jdiff</directory> <directory>${basedir}/dev-support/jdiff</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory> <outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory>

View File

@ -149,6 +149,8 @@ Trunk (Unreleased)
HADOOP-8924. Add maven plugin alternative to shell script to save HADOOP-8924. Add maven plugin alternative to shell script to save
package-info.java. (Chris Nauroth via suresh) package-info.java. (Chris Nauroth via suresh)
HADOOP-9277. Improve javadoc for FileContext. (Andrew Wang via suresh)
BUG FIXES BUG FIXES
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang) HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
@ -339,7 +341,26 @@ Trunk (Unreleased)
HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm) HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
Release 2.0.3-alpha - Unreleased Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
IMPROVEMENTS
HADOOP-9253. Capture ulimit info in the logs at service start time.
(Arpit Gupta via suresh)
OPTIMIZATIONS
BUG FIXES
HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -467,6 +488,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9231. Parametrize staging URL for the uniformity of HADOOP-9231. Parametrize staging URL for the uniformity of
distributionManagement. (Konstantin Boudnik via suresh) distributionManagement. (Konstantin Boudnik via suresh)
HADOOP-9276. Allow BoundedByteArrayOutputStream to be resettable.
(Arun Murthy via hitesh)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@ -592,6 +616,22 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9221. Convert remaining xdocs to APT. (Andy Isaacson via atm) HADOOP-9221. Convert remaining xdocs to APT. (Andy Isaacson via atm)
HADOOP-8981. TestMetricsSystemImpl fails on Windows. (Xuan Gong via suresh) HADOOP-8981. TestMetricsSystemImpl fails on Windows. (Xuan Gong via suresh)
HADOOP-9124. SortedMapWritable violates contract of Map interface for
equals() and hashCode(). (Surenkumar Nihalani via tomwhite)
HADOOP-9252. In StringUtils, humanReadableInt(..) has a race condition and
the synchronization of limitDecimalTo2(double) can be avoided. (szetszwo)
HADOOP-9260. Hadoop version may be not correct when starting name node or
data node. (Chris Nauroth via jlowe)
HADOOP-9278. Fix the file handle leak in HarMetaData.parseMetaData() in
HarFileSystem. (Chris Nauroth via szetszwo)
HADOOP-9289. FsShell rm -f fails for non-matching globs. (Daryn Sharp via
suresh)
Release 2.0.2-alpha - 2012-09-07 Release 2.0.2-alpha - 2012-09-07
@ -1294,6 +1334,9 @@ Release 0.23.7 - UNRELEASED
HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
permissions (Ivan A. Veselovsky via bobby) permissions (Ivan A. Veselovsky via bobby)
HADOOP-9067. provide test for LocalFileSystem.reportChecksumFailure
(Ivan A. Veselovsky via bobby)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -83,7 +83,8 @@ fi
if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
starting_secure_dn="true"
fi fi
if [ "$HADOOP_IDENT_STRING" = "" ]; then if [ "$HADOOP_IDENT_STRING" = "" ]; then
@ -154,7 +155,17 @@ case $startStop in
;; ;;
esac esac
echo $! > $pid echo $! > $pid
sleep 1; head "$log" sleep 1
# capture the ulimit output
if [ "true" = "$starting_secure_dn" ]; then
echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
# capture the ulimit info for the appropriate user
su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
else
echo "ulimit -a for user $USER" >> $log
ulimit -a >> $log 2>&1
fi
head -30 "$log"
sleep 3; sleep 3;
if ! ps -p $! > /dev/null ; then if ! ps -p $! > /dev/null ; then
exit 1 exit 1

View File

@ -57,70 +57,60 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
/** /**
* The FileContext class provides an interface to the application writer for * The FileContext class provides an interface for users of the Hadoop
* using the Hadoop file system. * file system. It exposes a number of file system operations, e.g. create,
* It provides a set of methods for the usual operation: create, open, * open, list.
* list, etc
* *
* <p> * <h2>Path Names</h2>
* <b> *** Path Names *** </b>
* <p>
* *
* The Hadoop file system supports a URI name space and URI names. * The Hadoop file system supports a URI namespace and URI names. This enables
* It offers a forest of file systems that can be referenced using fully * multiple types of file systems to be referenced using fully-qualified URIs.
* qualified URIs. * Two common Hadoop file system implementations are
* Two common Hadoop file systems implementations are
* <ul> * <ul>
* <li> the local file system: file:///path * <li>the local file system: file:///path
* <li> the hdfs file system hdfs://nnAddress:nnPort/path * <li>the HDFS file system: hdfs://nnAddress:nnPort/path
* </ul> * </ul>
* *
* While URI names are very flexible, it requires knowing the name or address * The Hadoop file system also supports additional naming schemes besides URIs.
* of the server. For convenience one often wants to access the default system * Hadoop has the concept of a <i>default file system</i>, which implies a
* in one's environment without knowing its name/address. This has an * default URI scheme and authority. This enables <i>slash-relative names</i>
* additional benefit that it allows one to change one's default fs * relative to the default FS, which are more convenient for users and
* (e.g. admin moves application from cluster1 to cluster2). * application writers. The default FS is typically set by the user's
* environment, though it can also be manually specified.
* <p> * <p>
* *
* To facilitate this, Hadoop supports a notion of a default file system. * Hadoop also supports <i>working-directory-relative</i> names, which are paths
* The user can set his default file system, although this is * relative to the current working directory (similar to Unix). The working
* typically set up for you in your environment via your default config. * directory can be in a different file system than the default FS.
* A default file system implies a default scheme and authority; slash-relative
* names (such as /for/bar) are resolved relative to that default FS.
* Similarly a user can also have working-directory-relative names (i.e. names
* not starting with a slash). While the working directory is generally in the
* same default FS, the wd can be in a different FS.
* <p> * <p>
* Hence Hadoop path names can be one of: * Thus, Hadoop path names can be specified as one of the following:
* <ul> * <ul>
* <li> fully qualified URI: scheme://authority/path * <li>a fully-qualified URI: scheme://authority/path (e.g.
* <li> slash relative names: /path relative to the default file system * hdfs://nnAddress:nnPort/foo/bar)
* <li> wd-relative names: path relative to the working dir * <li>a slash-relative name: path relative to the default file system (e.g.
* </ul> * /foo/bar)
* <li>a working-directory-relative name: path relative to the working dir (e.g.
* foo/bar)
* </ul>
* Relative paths with scheme (scheme:foo/bar) are illegal. * Relative paths with scheme (scheme:foo/bar) are illegal.
* *
* <p> * <h2>Role of FileContext and Configuration Defaults</h2>
* <b>****The Role of the FileContext and configuration defaults****</b> *
* <p> * The FileContext is the analogue of per-process file-related state in Unix. It
* The FileContext provides file namespace context for resolving file names; * contains two properties:
* it also contains the umask for permissions, In that sense it is like the *
* per-process file-related state in Unix system. * <ul>
* These two properties * <li>the default file system (for resolving slash-relative names)
* <ul> * <li>the umask (for file permissions)
* <li> default file system i.e your slash) * </ul>
* <li> umask * In general, these properties are obtained from the default configuration file
* </ul> * in the user's environment (see {@link Configuration}).
* in general, are obtained from the default configuration file *
* in your environment, (@see {@link Configuration}). * Further file system properties are specified on the server-side. File system
* * operations default to using these server-side defaults unless otherwise
* No other configuration parameters are obtained from the default config as * specified.
* far as the file context layer is concerned. All file system instances * <p>
* (i.e. deployments of file systems) have default properties; we call these * The file system related server-side defaults are:
* server side (SS) defaults. Operation like create allow one to select many
* properties: either pass them in as explicit parameters or use
* the SS properties.
* <p>
* The file system related SS defaults are
* <ul> * <ul>
* <li> the home directory (default is "/user/userName") * <li> the home directory (default is "/user/userName")
* <li> the initial wd (only for local fs) * <li> the initial wd (only for local fs)
@ -131,34 +121,34 @@ import org.apache.hadoop.util.ShutdownHookManager;
* <li> checksum option. (checksumType and bytesPerChecksum) * <li> checksum option. (checksumType and bytesPerChecksum)
* </ul> * </ul>
* *
* <p> * <h2>Example Usage</h2>
* <b> *** Usage Model for the FileContext class *** </b> *
* <p>
* Example 1: use the default config read from the $HADOOP_CONFIG/core.xml. * Example 1: use the default config read from the $HADOOP_CONFIG/core.xml.
* Unspecified values come from core-defaults.xml in the release jar. * Unspecified values come from core-defaults.xml in the release jar.
* <ul> * <ul>
* <li> myFContext = FileContext.getFileContext(); // uses the default config * <li> myFContext = FileContext.getFileContext(); // uses the default config
* // which has your default FS * // which has your default FS
* <li> myFContext.create(path, ...); * <li> myFContext.create(path, ...);
* <li> myFContext.setWorkingDir(path) * <li> myFContext.setWorkingDir(path);
* <li> myFContext.open (path, ...); * <li> myFContext.open (path, ...);
* <li>...
* </ul> * </ul>
* Example 2: Get a FileContext with a specific URI as the default FS * Example 2: Get a FileContext with a specific URI as the default FS
* <ul> * <ul>
* <li> myFContext = FileContext.getFileContext(URI) * <li> myFContext = FileContext.getFileContext(URI);
* <li> myFContext.create(path, ...); * <li> myFContext.create(path, ...);
* ... * <li>...
* </ul> * </ul>
* Example 3: FileContext with local file system as the default * Example 3: FileContext with local file system as the default
* <ul> * <ul>
* <li> myFContext = FileContext.getLocalFSFileContext() * <li> myFContext = FileContext.getLocalFSFileContext();
* <li> myFContext.create(path, ...); * <li> myFContext.create(path, ...);
* <li> ... * <li> ...
* </ul> * </ul>
* Example 4: Use a specific config, ignoring $HADOOP_CONFIG * Example 4: Use a specific config, ignoring $HADOOP_CONFIG
* Generally you should not need use a config unless you are doing * Generally you should not need use a config unless you are doing
* <ul> * <ul>
* <li> configX = someConfigSomeOnePassedToYou. * <li> configX = someConfigSomeOnePassedToYou;
* <li> myFContext = getFileContext(configX); // configX is not changed, * <li> myFContext = getFileContext(configX); // configX is not changed,
* // is passed down * // is passed down
* <li> myFContext.create(path, ...); * <li> myFContext.create(path, ...);

View File

@ -30,8 +30,11 @@ import java.util.TreeMap;
import java.util.HashMap; import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -50,6 +53,9 @@ import org.apache.hadoop.util.Progressable;
*/ */
public class HarFileSystem extends FilterFileSystem { public class HarFileSystem extends FilterFileSystem {
private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
public static final int VERSION = 3; public static final int VERSION = 3;
private static final Map<URI, HarMetaData> harMetaCache = private static final Map<URI, HarMetaData> harMetaCache =
@ -1025,68 +1031,69 @@ public class HarFileSystem extends FilterFileSystem {
} }
private void parseMetaData() throws IOException { private void parseMetaData() throws IOException {
FSDataInputStream in = fs.open(masterIndexPath); Text line;
FileStatus masterStat = fs.getFileStatus(masterIndexPath); long read;
masterIndexTimestamp = masterStat.getModificationTime(); FSDataInputStream in = null;
LineReader lin = new LineReader(in, getConf()); LineReader lin = null;
Text line = new Text();
long read = lin.readLine(line);
// the first line contains the version of the index file
String versionLine = line.toString();
String[] arr = versionLine.split(" ");
version = Integer.parseInt(arr[0]);
// make it always backwards-compatible
if (this.version > HarFileSystem.VERSION) {
throw new IOException("Invalid version " +
this.version + " expected " + HarFileSystem.VERSION);
}
// each line contains a hashcode range and the index file name
String[] readStr = null;
while(read < masterStat.getLen()) {
int b = lin.readLine(line);
read += b;
readStr = line.toString().split(" ");
int startHash = Integer.parseInt(readStr[0]);
int endHash = Integer.parseInt(readStr[1]);
stores.add(new Store(Long.parseLong(readStr[2]),
Long.parseLong(readStr[3]), startHash,
endHash));
line.clear();
}
try { try {
// close the master index in = fs.open(masterIndexPath);
lin.close(); FileStatus masterStat = fs.getFileStatus(masterIndexPath);
} catch(IOException io){ masterIndexTimestamp = masterStat.getModificationTime();
// do nothing just a read. lin = new LineReader(in, getConf());
line = new Text();
read = lin.readLine(line);
// the first line contains the version of the index file
String versionLine = line.toString();
String[] arr = versionLine.split(" ");
version = Integer.parseInt(arr[0]);
// make it always backwards-compatible
if (this.version > HarFileSystem.VERSION) {
throw new IOException("Invalid version " +
this.version + " expected " + HarFileSystem.VERSION);
}
// each line contains a hashcode range and the index file name
String[] readStr = null;
while(read < masterStat.getLen()) {
int b = lin.readLine(line);
read += b;
readStr = line.toString().split(" ");
int startHash = Integer.parseInt(readStr[0]);
int endHash = Integer.parseInt(readStr[1]);
stores.add(new Store(Long.parseLong(readStr[2]),
Long.parseLong(readStr[3]), startHash,
endHash));
line.clear();
}
} finally {
IOUtils.cleanup(LOG, lin, in);
} }
FSDataInputStream aIn = fs.open(archiveIndexPath); FSDataInputStream aIn = fs.open(archiveIndexPath);
FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
archiveIndexTimestamp = archiveStat.getModificationTime();
LineReader aLin;
// now start reading the real index file
for (Store s: stores) {
read = 0;
aIn.seek(s.begin);
aLin = new LineReader(aIn, getConf());
while (read + s.begin < s.end) {
int tmp = aLin.readLine(line);
read += tmp;
String lineFeed = line.toString();
String[] parsed = lineFeed.split(" ");
parsed[0] = decodeFileName(parsed[0]);
archive.put(new Path(parsed[0]), new HarStatus(lineFeed));
line.clear();
}
}
try { try {
// close the archive index FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
aIn.close(); archiveIndexTimestamp = archiveStat.getModificationTime();
} catch(IOException io) { LineReader aLin;
// do nothing just a read.
// now start reading the real index file
for (Store s: stores) {
read = 0;
aIn.seek(s.begin);
aLin = new LineReader(aIn, getConf());
while (read + s.begin < s.end) {
int tmp = aLin.readLine(line);
read += tmp;
String lineFeed = line.toString();
String[] parsed = lineFeed.split(" ");
parsed[0] = decodeFileName(parsed[0]);
archive.put(new Path(parsed[0]), new HarStatus(lineFeed));
line.clear();
}
}
} finally {
IOUtils.cleanup(LOG, aIn);
} }
} }
} }

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.fs.shell;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -28,6 +29,7 @@ import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.fs.PathIsDirectoryException;
import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.Trash;
/** /**
@ -71,6 +73,19 @@ class Delete {
skipTrash = cf.getOpt("skipTrash"); skipTrash = cf.getOpt("skipTrash");
} }
@Override
protected List<PathData> expandArgument(String arg) throws IOException {
try {
return super.expandArgument(arg);
} catch (PathNotFoundException e) {
if (!ignoreFNF) {
throw e;
}
// prevent -f on a non-existent glob from failing
return new LinkedList<PathData>();
}
}
@Override @Override
protected void processNonexistentPath(PathData item) throws IOException { protected void processNonexistentPath(PathData item) throws IOException {
if (!ignoreFNF) super.processNonexistentPath(item); if (!ignoreFNF) super.processNonexistentPath(item);

View File

@ -48,7 +48,7 @@ class FsUsage extends FsCommand {
protected String formatSize(long size) { protected String formatSize(long size) {
return humanReadable return humanReadable
? StringUtils.humanReadableInt(size) ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size); : String.valueOf(size);
} }

View File

@ -67,7 +67,7 @@ class Ls extends FsCommand {
protected boolean humanReadable = false; protected boolean humanReadable = false;
protected String formatSize(long size) { protected String formatSize(long size) {
return humanReadable return humanReadable
? StringUtils.humanReadableInt(size) ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size); : String.valueOf(size);
} }

View File

@ -32,9 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable @InterfaceStability.Unstable
public class BoundedByteArrayOutputStream extends OutputStream { public class BoundedByteArrayOutputStream extends OutputStream {
private final byte[] buffer; private byte[] buffer;
private int startOffset;
private int limit; private int limit;
private int count; private int currentPointer;
/** /**
* Create a BoundedByteArrayOutputStream with the specified * Create a BoundedByteArrayOutputStream with the specified
@ -52,20 +53,30 @@ public class BoundedByteArrayOutputStream extends OutputStream {
* @param limit The maximum limit upto which data can be written * @param limit The maximum limit upto which data can be written
*/ */
public BoundedByteArrayOutputStream(int capacity, int limit) { public BoundedByteArrayOutputStream(int capacity, int limit) {
this(new byte[capacity], 0, limit);
}
protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
resetBuffer(buf, offset, limit);
}
protected void resetBuffer(byte[] buf, int offset, int limit) {
int capacity = buf.length - offset;
if ((capacity < limit) || (capacity | limit) < 0) { if ((capacity < limit) || (capacity | limit) < 0) {
throw new IllegalArgumentException("Invalid capacity/limit"); throw new IllegalArgumentException("Invalid capacity/limit");
} }
this.buffer = new byte[capacity]; this.buffer = buf;
this.limit = limit; this.startOffset = offset;
this.count = 0; this.currentPointer = offset;
this.limit = offset + limit;
} }
@Override @Override
public void write(int b) throws IOException { public void write(int b) throws IOException {
if (count >= limit) { if (currentPointer >= limit) {
throw new EOFException("Reaching the limit of the buffer."); throw new EOFException("Reaching the limit of the buffer.");
} }
buffer[count++] = (byte) b; buffer[currentPointer++] = (byte) b;
} }
@Override @Override
@ -77,12 +88,12 @@ public class BoundedByteArrayOutputStream extends OutputStream {
return; return;
} }
if (count + len > limit) { if (currentPointer + len > limit) {
throw new EOFException("Reach the limit of the buffer"); throw new EOFException("Reach the limit of the buffer");
} }
System.arraycopy(b, off, buffer, count, len); System.arraycopy(b, off, buffer, currentPointer, len);
count += len; currentPointer += len;
} }
/** /**
@ -90,17 +101,17 @@ public class BoundedByteArrayOutputStream extends OutputStream {
* @param newlim New Limit * @param newlim New Limit
*/ */
public void reset(int newlim) { public void reset(int newlim) {
if (newlim > buffer.length) { if (newlim > (buffer.length - startOffset)) {
throw new IndexOutOfBoundsException("Limit exceeds buffer size"); throw new IndexOutOfBoundsException("Limit exceeds buffer size");
} }
this.limit = newlim; this.limit = newlim;
this.count = 0; this.currentPointer = startOffset;
} }
/** Reset the buffer */ /** Reset the buffer */
public void reset() { public void reset() {
this.limit = buffer.length; this.limit = buffer.length - startOffset;
this.count = 0; this.currentPointer = startOffset;
} }
/** Return the current limit */ /** Return the current limit */
@ -119,6 +130,10 @@ public class BoundedByteArrayOutputStream extends OutputStream {
* currently in the buffer. * currently in the buffer.
*/ */
public int size() { public int size() {
return count; return currentPointer - startOffset;
}
public int available() {
return limit - currentPointer;
} }
} }

View File

@ -203,4 +203,27 @@ public class SortedMapWritable extends AbstractMapWritable
e.getValue().write(out); e.getValue().write(out);
} }
} }
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof SortedMapWritable) {
Map map = (Map) obj;
if (size() != map.size()) {
return false;
}
return entrySet().equals(map.entrySet());
}
return false;
}
@Override
public int hashCode() {
return instance.hashCode();
}
} }

View File

@ -35,6 +35,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.ConnectTimeoutException;
/** /**
* <p> * <p>
@ -543,6 +544,7 @@ public class RetryPolicies {
e instanceof NoRouteToHostException || e instanceof NoRouteToHostException ||
e instanceof UnknownHostException || e instanceof UnknownHostException ||
e instanceof StandbyException || e instanceof StandbyException ||
e instanceof ConnectTimeoutException ||
isWrappedStandbyException(e)) { isWrappedStandbyException(e)) {
return new RetryAction( return new RetryAction(
RetryAction.RetryDecision.FAILOVER_AND_RETRY, RetryAction.RetryDecision.FAILOVER_AND_RETRY,

View File

@ -67,6 +67,7 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SaslRpcClient; import org.apache.hadoop.security.SaslRpcClient;
@ -511,14 +512,14 @@ public class Client {
} }
this.socket.setSoTimeout(pingInterval); this.socket.setSoTimeout(pingInterval);
return; return;
} catch (SocketTimeoutException toe) { } catch (ConnectTimeoutException toe) {
/* Check for an address change and update the local reference. /* Check for an address change and update the local reference.
* Reset the failure counter if the address was changed * Reset the failure counter if the address was changed
*/ */
if (updateAddress()) { if (updateAddress()) {
timeoutFailures = ioFailures = 0; timeoutFailures = ioFailures = 0;
} }
handleConnectionFailure(timeoutFailures++, handleConnectionTimeout(timeoutFailures++,
maxRetriesOnSocketTimeouts, toe); maxRetriesOnSocketTimeouts, toe);
} catch (IOException ie) { } catch (IOException ie) {
if (updateAddress()) { if (updateAddress()) {
@ -680,7 +681,7 @@ public class Client {
socket = null; socket = null;
} }
/* Handle connection failures /* Handle connection failures due to timeout on connect
* *
* If the current number of retries is equal to the max number of retries, * If the current number of retries is equal to the max number of retries,
* stop retrying and throw the exception; Otherwise backoff 1 second and * stop retrying and throw the exception; Otherwise backoff 1 second and
@ -694,7 +695,7 @@ public class Client {
* @param ioe failure reason * @param ioe failure reason
* @throws IOException if max number of retries is reached * @throws IOException if max number of retries is reached
*/ */
private void handleConnectionFailure( private void handleConnectionTimeout(
int curRetries, int maxRetries, IOException ioe) throws IOException { int curRetries, int maxRetries, IOException ioe) throws IOException {
closeConnection(); closeConnection();

View File

@ -0,0 +1,37 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.net.SocketTimeoutException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown by {@link NetUtils#connect(java.net.Socket, java.net.SocketAddress, int)}
* if it times out while connecting to the remote host.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ConnectTimeoutException extends SocketTimeoutException {
private static final long serialVersionUID = 1L;
public ConnectTimeoutException(String msg) {
super(msg);
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.net;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.lang.reflect.Constructor;
import java.net.BindException; import java.net.BindException;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -517,11 +518,15 @@ public class NetUtils {
socket.bind(localAddr); socket.bind(localAddr);
} }
if (ch == null) { try {
// let the default implementation handle it. if (ch == null) {
socket.connect(endpoint, timeout); // let the default implementation handle it.
} else { socket.connect(endpoint, timeout);
SocketIOWithTimeout.connect(ch, endpoint, timeout); } else {
SocketIOWithTimeout.connect(ch, endpoint, timeout);
}
} catch (SocketTimeoutException ste) {
throw new ConnectTimeoutException(ste.getMessage());
} }
// There is a very rare case allowed by the TCP specification, such that // There is a very rare case allowed by the TCP specification, such that
@ -719,7 +724,7 @@ public class NetUtils {
+ see("BindException")); + see("BindException"));
} else if (exception instanceof ConnectException) { } else if (exception instanceof ConnectException) {
// connection refused; include the host:port in the error // connection refused; include the host:port in the error
return (ConnectException) new ConnectException( return wrapWithMessage(exception,
"Call From " "Call From "
+ localHost + localHost
+ " to " + " to "
@ -729,32 +734,28 @@ public class NetUtils {
+ " failed on connection exception: " + " failed on connection exception: "
+ exception + exception
+ ";" + ";"
+ see("ConnectionRefused")) + see("ConnectionRefused"));
.initCause(exception);
} else if (exception instanceof UnknownHostException) { } else if (exception instanceof UnknownHostException) {
return (UnknownHostException) new UnknownHostException( return wrapWithMessage(exception,
"Invalid host name: " "Invalid host name: "
+ getHostDetailsAsString(destHost, destPort, localHost) + getHostDetailsAsString(destHost, destPort, localHost)
+ exception + exception
+ ";" + ";"
+ see("UnknownHost")) + see("UnknownHost"));
.initCause(exception);
} else if (exception instanceof SocketTimeoutException) { } else if (exception instanceof SocketTimeoutException) {
return (SocketTimeoutException) new SocketTimeoutException( return wrapWithMessage(exception,
"Call From " "Call From "
+ localHost + " to " + destHost + ":" + destPort + localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception + " failed on socket timeout exception: " + exception
+ ";" + ";"
+ see("SocketTimeout")) + see("SocketTimeout"));
.initCause(exception);
} else if (exception instanceof NoRouteToHostException) { } else if (exception instanceof NoRouteToHostException) {
return (NoRouteToHostException) new NoRouteToHostException( return wrapWithMessage(exception,
"No Route to Host from " "No Route to Host from "
+ localHost + " to " + destHost + ":" + destPort + localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception + " failed on socket timeout exception: " + exception
+ ";" + ";"
+ see("NoRouteToHost")) + see("NoRouteToHost"));
.initCause(exception);
} }
else { else {
return (IOException) new IOException("Failed on local exception: " return (IOException) new IOException("Failed on local exception: "
@ -769,6 +770,21 @@ public class NetUtils {
private static String see(final String entry) { private static String see(final String entry) {
return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry; return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry;
} }
@SuppressWarnings("unchecked")
private static <T extends IOException> T wrapWithMessage(
T exception, String msg) {
Class<? extends Throwable> clazz = exception.getClass();
try {
Constructor<? extends Throwable> ctor = clazz.getConstructor(String.class);
Throwable t = ctor.newInstance(msg);
return (T)(t.initCause(exception));
} catch (Throwable e) {
LOG.warn("Unable to wrap exception of type " +
clazz + ": it has no (String) constructor", e);
return exception;
}
}
/** /**
* Get the host details as a string * Get the host details as a string

View File

@ -301,17 +301,25 @@ public class UserGroupInformation {
private static String OS_LOGIN_MODULE_NAME; private static String OS_LOGIN_MODULE_NAME;
private static Class<? extends Principal> OS_PRINCIPAL_CLASS; private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
private static final boolean windows = private static final boolean windows =
System.getProperty("os.name").startsWith("Windows"); System.getProperty("os.name").startsWith("Windows");
private static final boolean is64Bit = private static final boolean is64Bit =
System.getProperty("os.arch").contains("64"); System.getProperty("os.arch").contains("64");
private static final boolean ibmJava = System.getProperty("java.vendor").contains("IBM");
private static final boolean aix = System.getProperty("os.name").equals("AIX");
/* Return the OS login module class name */ /* Return the OS login module class name */
private static String getOSLoginModuleName() { private static String getOSLoginModuleName() {
if (System.getProperty("java.vendor").contains("IBM")) { if (ibmJava) {
return windows ? (is64Bit if (windows) {
? "com.ibm.security.auth.module.Win64LoginModule" return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
: "com.ibm.security.auth.module.NTLoginModule") : "com.ibm.security.auth.module.NTLoginModule";
: "com.ibm.security.auth.module.LinuxLoginModule"; } else if (aix) {
return "com.ibm.security.auth.module.AIXLoginModule";
} else {
return "com.ibm.security.auth.module.LinuxLoginModule";
}
} else { } else {
return windows ? "com.sun.security.auth.module.NTLoginModule" return windows ? "com.sun.security.auth.module.NTLoginModule"
: "com.sun.security.auth.module.UnixLoginModule"; : "com.sun.security.auth.module.UnixLoginModule";
@ -323,11 +331,14 @@ public class UserGroupInformation {
private static Class<? extends Principal> getOsPrincipalClass() { private static Class<? extends Principal> getOsPrincipalClass() {
ClassLoader cl = ClassLoader.getSystemClassLoader(); ClassLoader cl = ClassLoader.getSystemClassLoader();
try { try {
if (System.getProperty("java.vendor").contains("IBM")) { if (ibmJava) {
if (windows) { if (windows) {
return (Class<? extends Principal>) (is64Bit return (Class<? extends Principal>) (is64Bit
? cl.loadClass("com.ibm.security.auth.UsernamePrincipal") ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
: cl.loadClass("com.ibm.security.auth.NTUserPrincipal")); : cl.loadClass("com.ibm.security.auth.NTUserPrincipal"));
} else if (aix) {
return (Class<? extends Principal>)
cl.loadClass("com.ibm.security.auth.AIXPrincipal");
} else { } else {
return (Class<? extends Principal>) (is64Bit return (Class<? extends Principal>) (is64Bit
? cl.loadClass("com.ibm.security.auth.UsernamePrincipal") ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
@ -418,12 +429,21 @@ public class UserGroupInformation {
private static final Map<String,String> USER_KERBEROS_OPTIONS = private static final Map<String,String> USER_KERBEROS_OPTIONS =
new HashMap<String,String>(); new HashMap<String,String>();
static { static {
USER_KERBEROS_OPTIONS.put("doNotPrompt", "true"); if (ibmJava) {
USER_KERBEROS_OPTIONS.put("useTicketCache", "true"); USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
USER_KERBEROS_OPTIONS.put("renewTGT", "true"); } else {
USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
USER_KERBEROS_OPTIONS.put("renewTGT", "true");
}
String ticketCache = System.getenv("KRB5CCNAME"); String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) { if (ticketCache != null) {
USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache); if (ibmJava) {
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
} else {
USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
}
} }
USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS); USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
} }
@ -434,10 +454,14 @@ public class UserGroupInformation {
private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS = private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS =
new HashMap<String,String>(); new HashMap<String,String>();
static { static {
KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true"); if (ibmJava) {
KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true"); KEYTAB_KERBEROS_OPTIONS.put("credsType", "both");
KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true"); } else {
KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true"); KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
}
KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS); KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
} }
private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
@ -462,7 +486,12 @@ public class UserGroupInformation {
} else if (USER_KERBEROS_CONFIG_NAME.equals(appName)) { } else if (USER_KERBEROS_CONFIG_NAME.equals(appName)) {
return USER_KERBEROS_CONF; return USER_KERBEROS_CONF;
} else if (KEYTAB_KERBEROS_CONFIG_NAME.equals(appName)) { } else if (KEYTAB_KERBEROS_CONFIG_NAME.equals(appName)) {
KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile); if (ibmJava) {
KEYTAB_KERBEROS_OPTIONS.put("useKeytab",
prependFileAuthority(keytabFile));
} else {
KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
}
KEYTAB_KERBEROS_OPTIONS.put("principal", keytabPrincipal); KEYTAB_KERBEROS_OPTIONS.put("principal", keytabPrincipal);
return KEYTAB_KERBEROS_CONF; return KEYTAB_KERBEROS_CONF;
} }
@ -470,6 +499,11 @@ public class UserGroupInformation {
} }
} }
private static String prependFileAuthority(String keytabPath) {
return keytabPath.startsWith("file://") ? keytabPath
: "file://" + keytabPath;
}
/** /**
* Represents a javax.security configuration that is created at runtime. * Represents a javax.security configuration that is created at runtime.
*/ */
@ -666,6 +700,7 @@ public class UserGroupInformation {
} }
loginUser.spawnAutoRenewalThreadForUserCreds(); loginUser.spawnAutoRenewalThreadForUserCreds();
} catch (LoginException le) { } catch (LoginException le) {
LOG.debug("failure to login", le);
throw new IOException("failure to login", le); throw new IOException("failure to login", le);
} }
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.util; package org.apache.hadoop.util;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
@ -39,7 +40,7 @@ import org.apache.hadoop.io.Text;
*/ */
@InterfaceAudience.LimitedPrivate({"MapReduce"}) @InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable @InterfaceStability.Unstable
public class LineReader { public class LineReader implements Closeable {
private static final int DEFAULT_BUFFER_SIZE = 64 * 1024; private static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
private int bufferSize = DEFAULT_BUFFER_SIZE; private int bufferSize = DEFAULT_BUFFER_SIZE;
private InputStream in; private InputStream in;

View File

@ -23,8 +23,6 @@ import java.io.StringWriter;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.text.DateFormat; import java.text.DateFormat;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
@ -34,12 +32,13 @@ import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.StringTokenizer; import java.util.StringTokenizer;
import com.google.common.net.InetAddresses;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import com.google.common.net.InetAddresses;
/** /**
* General string utils * General string utils
*/ */
@ -52,13 +51,6 @@ public class StringUtils {
*/ */
public static final int SHUTDOWN_HOOK_PRIORITY = 0; public static final int SHUTDOWN_HOOK_PRIORITY = 0;
private static final DecimalFormat decimalFormat;
static {
NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
decimalFormat = (DecimalFormat) numberFormat;
decimalFormat.applyPattern("#.##");
}
/** /**
* Make a string representation of the exception. * Make a string representation of the exception.
* @param e The exception to stringify * @param e The exception to stringify
@ -87,50 +79,33 @@ public class StringUtils {
} }
return fullHostname; return fullHostname;
} }
private static DecimalFormat oneDecimal = new DecimalFormat("0.0");
/** /**
* Given an integer, return a string that is in an approximate, but human * Given an integer, return a string that is in an approximate, but human
* readable format. * readable format.
* It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
* @param number the number to format * @param number the number to format
* @return a human readable form of the integer * @return a human readable form of the integer
*
* @deprecated use {@link TraditionalBinaryPrefix#long2String(long, String, int)}.
*/ */
@Deprecated
public static String humanReadableInt(long number) { public static String humanReadableInt(long number) {
long absNumber = Math.abs(number); return TraditionalBinaryPrefix.long2String(number, "", 1);
double result = number;
String suffix = "";
if (absNumber < 1024) {
// since no division has occurred, don't format with a decimal point
return String.valueOf(number);
} else if (absNumber < 1024 * 1024) {
result = number / 1024.0;
suffix = "k";
} else if (absNumber < 1024 * 1024 * 1024) {
result = number / (1024.0 * 1024);
suffix = "m";
} else {
result = number / (1024.0 * 1024 * 1024);
suffix = "g";
}
return oneDecimal.format(result) + suffix;
} }
/** The same as String.format(Locale.ENGLISH, format, objects). */
public static String format(final String format, final Object... objects) {
return String.format(Locale.ENGLISH, format, objects);
}
/** /**
* Format a percentage for presentation to the user. * Format a percentage for presentation to the user.
* @param done the percentage to format (0.0 to 1.0) * @param fraction the percentage as a fraction, e.g. 0.1 = 10%
* @param digits the number of digits past the decimal point * @param decimalPlaces the number of decimal places
* @return a string representation of the percentage * @return a string representation of the percentage
*/ */
public static String formatPercent(double done, int digits) { public static String formatPercent(double fraction, int decimalPlaces) {
DecimalFormat percentFormat = new DecimalFormat("0.00%"); return format("%." + decimalPlaces + "f%%", fraction*100);
double scale = Math.pow(10.0, digits+2);
double rounded = Math.floor(done * scale);
percentFormat.setDecimalSeparatorAlwaysShown(false);
percentFormat.setMinimumFractionDigits(digits);
percentFormat.setMaximumFractionDigits(digits);
return percentFormat.format(rounded / scale);
} }
/** /**
@ -165,7 +140,7 @@ public class StringUtils {
} }
StringBuilder s = new StringBuilder(); StringBuilder s = new StringBuilder();
for(int i = start; i < end; i++) { for(int i = start; i < end; i++) {
s.append(String.format("%02x", bytes[i])); s.append(format("%02x", bytes[i]));
} }
return s.toString(); return s.toString();
} }
@ -630,18 +605,22 @@ public class StringUtils {
* TraditionalBinaryPrefix symbol are case insensitive. * TraditionalBinaryPrefix symbol are case insensitive.
*/ */
public static enum TraditionalBinaryPrefix { public static enum TraditionalBinaryPrefix {
KILO(1024), KILO(10),
MEGA(KILO.value << 10), MEGA(KILO.bitShift + 10),
GIGA(MEGA.value << 10), GIGA(MEGA.bitShift + 10),
TERA(GIGA.value << 10), TERA(GIGA.bitShift + 10),
PETA(TERA.value << 10), PETA(TERA.bitShift + 10),
EXA(PETA.value << 10); EXA (PETA.bitShift + 10);
public final long value; public final long value;
public final char symbol; public final char symbol;
public final int bitShift;
public final long bitMask;
TraditionalBinaryPrefix(long value) { private TraditionalBinaryPrefix(int bitShift) {
this.value = value; this.bitShift = bitShift;
this.value = 1L << bitShift;
this.bitMask = this.value - 1L;
this.symbol = toString().charAt(0); this.symbol = toString().charAt(0);
} }
@ -692,8 +671,58 @@ public class StringUtils {
return num * prefix; return num * prefix;
} }
} }
/**
* Convert a long integer to a string with traditional binary prefix.
*
* @param n the value to be converted
* @param unit The unit, e.g. "B" for bytes.
* @param decimalPlaces The number of decimal places.
* @return a string with traditional binary prefix.
*/
public static String long2String(long n, String unit, int decimalPlaces) {
if (unit == null) {
unit = "";
}
//take care a special case
if (n == Long.MIN_VALUE) {
return "-8 " + EXA.symbol + unit;
}
final StringBuilder b = new StringBuilder();
//take care negative numbers
if (n < 0) {
b.append('-');
n = -n;
}
if (n < KILO.value) {
//no prefix
b.append(n);
return (unit.isEmpty()? b: b.append(" ").append(unit)).toString();
} else {
//find traditional binary prefix
int i = 0;
for(; i < values().length && n >= values()[i].value; i++);
TraditionalBinaryPrefix prefix = values()[i - 1];
if ((n & prefix.bitMask) == 0) {
//exact division
b.append(n >> prefix.bitShift);
} else {
final String format = "%." + decimalPlaces + "f";
String s = format(format, n/(double)prefix.value);
//check a special rounding up case
if (s.startsWith("1024")) {
prefix = values()[i];
s = format(format, n/(double)prefix.value);
}
b.append(s);
}
return b.append(' ').append(prefix.symbol).append(unit).toString();
}
}
} }
/** /**
* Escapes HTML Special characters present in the string. * Escapes HTML Special characters present in the string.
* @param string * @param string
@ -731,32 +760,16 @@ public class StringUtils {
} }
/** /**
* Return an abbreviated English-language desc of the byte length * @return a byte description of the given long interger value.
*/ */
public static String byteDesc(long len) { public static String byteDesc(long len) {
double val = 0.0; return TraditionalBinaryPrefix.long2String(len, "B", 2);
String ending = "";
if (len < 1024 * 1024) {
val = (1.0 * len) / 1024;
ending = " KB";
} else if (len < 1024 * 1024 * 1024) {
val = (1.0 * len) / (1024 * 1024);
ending = " MB";
} else if (len < 1024L * 1024 * 1024 * 1024) {
val = (1.0 * len) / (1024 * 1024 * 1024);
ending = " GB";
} else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
val = (1.0 * len) / (1024L * 1024 * 1024 * 1024);
ending = " TB";
} else {
val = (1.0 * len) / (1024L * 1024 * 1024 * 1024 * 1024);
ending = " PB";
}
return limitDecimalTo2(val) + ending;
} }
public static synchronized String limitDecimalTo2(double d) { /** @deprecated use StringUtils.format("%.2f", d). */
return decimalFormat.format(d); @Deprecated
public static String limitDecimalTo2(double d) {
return format("%.2f", d);
} }
/** /**

View File

@ -325,6 +325,13 @@ Hadoop MapReduce Next Generation - Cluster Setup
| | | How long to keep aggregation logs before deleting them. -1 disables. | | | | How long to keep aggregation logs before deleting them. -1 disables. |
| | | Be careful, set this too small and you will spam the name node. | | | | Be careful, set this too small and you will spam the name node. |
*-------------------------+-------------------------+------------------------+ *-------------------------+-------------------------+------------------------+
| <<<yarn.log-aggregation.retain-check-interval-seconds>>> | | |
| | <-1> | |
| | | Time between checks for aggregated log retention. If set to 0 or a |
| | | negative value then the value is computed as one-tenth of the |
| | | aggregated log retention time. |
| | | Be careful, set this too small and you will spam the name node. |
*-------------------------+-------------------------+------------------------+

View File

@ -303,6 +303,46 @@ public class TestFsShellReturnCode {
} }
} }
@Test
public void testRmWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
final String results;
try {
int exit = shell.run(new String[]{"-rm", "nomatch*"});
assertEquals(1, exit);
results = bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
@Test
public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
try {
int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
assertEquals(0, exit);
assertTrue(bytes.toString().isEmpty());
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
@Test @Test
public void testInvalidDefaultFS() throws Exception { public void testInvalidDefaultFS() throws Exception {
// if default fs doesn't exist or is invalid, but the path provided in // if default fs doesn't exist or is invalid, but the path provided in

View File

@ -28,6 +28,7 @@ import java.net.URI;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -46,8 +47,18 @@ public class TestHarFileSystemBasics {
private static final String ROOT_PATH = System.getProperty("test.build.data", private static final String ROOT_PATH = System.getProperty("test.build.data",
"build/test/data"); "build/test/data");
private static final Path rootPath = new Path( private static final Path rootPath;
new File(ROOT_PATH).getAbsolutePath() + "/localfs"); static {
String root = new Path(new File(ROOT_PATH).getAbsolutePath(), "localfs")
.toUri().getPath();
// Strip drive specifier on Windows, which would make the HAR URI invalid and
// cause tests to fail.
if (Shell.WINDOWS) {
root = root.substring(root.indexOf(':') + 1);
}
rootPath = new Path(root);
}
// NB: .har suffix is necessary // NB: .har suffix is necessary
private static final Path harPath = new Path(rootPath, "path1/path2/my.har"); private static final Path harPath = new Path(rootPath, "path1/path2/my.har");

View File

@ -45,19 +45,39 @@ public class TestListFiles {
final protected static Configuration conf = new Configuration(); final protected static Configuration conf = new Configuration();
protected static FileSystem fs; protected static FileSystem fs;
final protected static Path TEST_DIR = getTestDir(); protected static Path TEST_DIR;
final private static int FILE_LEN = 10; final private static int FILE_LEN = 10;
final private static Path FILE1 = new Path(TEST_DIR, "file1"); private static Path FILE1;
final private static Path DIR1 = new Path(TEST_DIR, "dir1"); private static Path DIR1;
final private static Path FILE2 = new Path(DIR1, "file2"); private static Path FILE2;
final private static Path FILE3 = new Path(DIR1, "file3"); private static Path FILE3;
static {
setTestPaths(new Path(
System.getProperty("test.build.data", "build/test/data/work-dir/localfs"),
"main_"));
}
protected static Path getTestDir() { protected static Path getTestDir() {
return new Path( return TEST_DIR;
System.getProperty("test.build.data","build/test/data/work-dir/localfs"),
"main_");
} }
/**
* Sets the root testing directory and reinitializes any additional test paths
* that are under the root. This method is intended to be called from a
* subclass's @BeforeClass method if there is a need to override the testing
* directory.
*
* @param testDir Path root testing directory
*/
protected static void setTestPaths(Path testDir) {
TEST_DIR = testDir;
FILE1 = new Path(TEST_DIR, "file1");
DIR1 = new Path(TEST_DIR, "dir1");
FILE2 = new Path(DIR1, "file2");
FILE3 = new Path(DIR1, "file3");
}
@BeforeClass @BeforeClass
public static void testSetUp() throws Exception { public static void testSetUp() throws Exception {
fs = FileSystem.getLocal(conf); fs = FileSystem.getLocal(conf);

View File

@ -28,6 +28,7 @@ import java.io.*;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue; import static org.junit.Assume.assumeTrue;
import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -38,8 +39,9 @@ public class TestLocalFileSystem {
private static final String TEST_ROOT_DIR private static final String TEST_ROOT_DIR
= System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs"; = System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
private final File base = new File(TEST_ROOT_DIR);
private Configuration conf; private Configuration conf;
private FileSystem fileSys; private LocalFileSystem fileSys;
private void cleanupFile(FileSystem fs, Path name) throws IOException { private void cleanupFile(FileSystem fs, Path name) throws IOException {
assertTrue(fs.exists(name)); assertTrue(fs.exists(name));
@ -53,6 +55,13 @@ public class TestLocalFileSystem {
fileSys = FileSystem.getLocal(conf); fileSys = FileSystem.getLocal(conf);
fileSys.delete(new Path(TEST_ROOT_DIR), true); fileSys.delete(new Path(TEST_ROOT_DIR), true);
} }
@After
public void after() throws IOException {
base.setWritable(true);
FileUtil.fullyDelete(base);
assertTrue(!base.exists());
}
/** /**
* Test the capability of setting the working directory. * Test the capability of setting the working directory.
@ -269,10 +278,83 @@ public class TestLocalFileSystem {
LocalFileSystem fs = FileSystem.getLocal(conf); LocalFileSystem fs = FileSystem.getLocal(conf);
File colonFile = new File(TEST_ROOT_DIR, "foo:bar"); File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
colonFile.mkdirs(); colonFile.mkdirs();
colonFile.createNewFile();
FileStatus[] stats = fs.listStatus(new Path(TEST_ROOT_DIR)); FileStatus[] stats = fs.listStatus(new Path(TEST_ROOT_DIR));
assertEquals("Unexpected number of stats", 1, stats.length); assertEquals("Unexpected number of stats", 1, stats.length);
assertEquals("Bad path from stat", colonFile.getAbsolutePath(), assertEquals("Bad path from stat", colonFile.getAbsolutePath(),
stats[0].getPath().toUri().getPath()); stats[0].getPath().toUri().getPath());
} }
@Test
public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
final File dir1 = new File(base, "dir1");
final File dir2 = new File(dir1, "dir2");
dir2.mkdirs();
assertTrue(dir2.exists() && dir2.canWrite());
final String dataFileName = "corruptedData";
final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
final Path checksumPath = fileSys.getChecksumFile(dataPath);
final FSDataOutputStream fsdos = fileSys.create(dataPath);
try {
fsdos.writeUTF("foo");
} finally {
fsdos.close();
}
assertTrue(fileSys.pathToFile(dataPath).exists());
final long dataFileLength = fileSys.getFileStatus(dataPath).getLen();
assertTrue(dataFileLength > 0);
// check the the checksum file is created and not empty:
assertTrue(fileSys.pathToFile(checksumPath).exists());
final long checksumFileLength = fileSys.getFileStatus(checksumPath).getLen();
assertTrue(checksumFileLength > 0);
// this is a hack to force the #reportChecksumFailure() method to stop
// climbing up at the 'base' directory and use 'dir1/bad_files' as the
// corrupted files storage:
base.setWritable(false);
FSDataInputStream dataFsdis = fileSys.open(dataPath);
FSDataInputStream checksumFsdis = fileSys.open(checksumPath);
boolean retryIsNecessary = fileSys.reportChecksumFailure(dataPath, dataFsdis, 0, checksumFsdis, 0);
assertTrue(!retryIsNecessary);
// the data file should be moved:
assertTrue(!fileSys.pathToFile(dataPath).exists());
// the checksum file should be moved:
assertTrue(!fileSys.pathToFile(checksumPath).exists());
// check that the files exist in the new location where they were moved:
File[] dir1files = dir1.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname != null && !pathname.getName().equals("dir2");
}
});
assertTrue(dir1files != null);
assertTrue(dir1files.length == 1);
File badFilesDir = dir1files[0];
File[] badFiles = badFilesDir.listFiles();
assertTrue(badFiles != null);
assertTrue(badFiles.length == 2);
boolean dataFileFound = false;
boolean checksumFileFound = false;
for (File badFile: badFiles) {
if (badFile.getName().startsWith(dataFileName)) {
assertTrue(dataFileLength == badFile.length());
dataFileFound = true;
} else if (badFile.getName().contains(dataFileName + ".crc")) {
assertTrue(checksumFileLength == badFile.length());
checksumFileFound = true;
}
}
assertTrue(dataFileFound);
assertTrue(checksumFileFound);
}
} }

View File

@ -88,4 +88,61 @@ public class TestBoundedByteArrayOutputStream extends TestCase {
assertTrue("Writing beyond limit did not throw an exception", assertTrue("Writing beyond limit did not throw an exception",
caughtException); caughtException);
} }
static class ResettableBoundedByteArrayOutputStream
extends BoundedByteArrayOutputStream {
public ResettableBoundedByteArrayOutputStream(int capacity) {
super(capacity);
}
public void resetBuffer(byte[] buf, int offset, int length) {
super.resetBuffer(buf, offset, length);
}
}
public void testResetBuffer() throws IOException {
ResettableBoundedByteArrayOutputStream stream =
new ResettableBoundedByteArrayOutputStream(SIZE);
// Write to the stream, get the data back and check for contents
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing beyond end of buffer. Should throw an exception
boolean caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
//Reset the stream and try, should succeed
byte[] newBuf = new byte[SIZE];
stream.resetBuffer(newBuf, 0, newBuf.length);
assertTrue("Limit did not get reset correctly",
(stream.getLimit() == SIZE));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing one more byte, should fail
caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
}
} }

View File

@ -17,15 +17,20 @@
*/ */
package org.apache.hadoop.io; package org.apache.hadoop.io;
import java.util.Map; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase; import java.util.Map;
import org.junit.Test;
/** /**
* Tests SortedMapWritable * Tests SortedMapWritable
*/ */
public class TestSortedMapWritable extends TestCase { public class TestSortedMapWritable {
/** the test */ /** the test */
@Test
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public void testSortedMapWritable() { public void testSortedMapWritable() {
Text[] keys = { Text[] keys = {
@ -90,6 +95,7 @@ public class TestSortedMapWritable extends TestCase {
/** /**
* Test that number of "unknown" classes is propagated across multiple copies. * Test that number of "unknown" classes is propagated across multiple copies.
*/ */
@Test
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public void testForeignClass() { public void testForeignClass() {
SortedMapWritable inMap = new SortedMapWritable(); SortedMapWritable inMap = new SortedMapWritable();
@ -99,4 +105,63 @@ public class TestSortedMapWritable extends TestCase {
SortedMapWritable copyOfCopy = new SortedMapWritable(outMap); SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
assertEquals(1, copyOfCopy.getNewClasses()); assertEquals(1, copyOfCopy.getNewClasses());
} }
/**
* Tests if equal and hashCode method still hold the contract.
*/
@Test
public void testEqualsAndHashCode() {
String failureReason;
SortedMapWritable mapA = new SortedMapWritable();
SortedMapWritable mapB = new SortedMapWritable();
// Sanity checks
failureReason = "SortedMapWritable couldn't be initialized. Got null reference";
assertNotNull(failureReason, mapA);
assertNotNull(failureReason, mapB);
// Basic null check
assertFalse("equals method returns true when passed null", mapA.equals(null));
// When entry set is empty, they should be equal
assertTrue("Two empty SortedMapWritables are no longer equal", mapA.equals(mapB));
// Setup
Text[] keys = {
new Text("key1"),
new Text("key2")
};
BytesWritable[] values = {
new BytesWritable("value1".getBytes()),
new BytesWritable("value2".getBytes())
};
mapA.put(keys[0], values[0]);
mapB.put(keys[1], values[1]);
// entrySets are different
failureReason = "Two SortedMapWritables with different data are now equal";
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason, !mapA.equals(mapB));
assertTrue(failureReason, !mapB.equals(mapA));
mapA.put(keys[1], values[1]);
mapB.put(keys[0], values[0]);
// entrySets are now same
failureReason = "Two SortedMapWritables with same entry sets formed in different order are now different";
assertEquals(failureReason, mapA.hashCode(), mapB.hashCode());
assertTrue(failureReason, mapA.equals(mapB));
assertTrue(failureReason, mapB.equals(mapA));
// Let's check if entry sets of same keys but different values
mapA.put(keys[0], values[1]);
mapA.put(keys[1], values[0]);
failureReason = "Two SortedMapWritables with different content are now equal";
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason, !mapA.equals(mapB));
assertTrue(failureReason, !mapB.equals(mapA));
}
} }

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import java.util.Random; import java.util.Random;
@ -586,7 +587,7 @@ public class TestIPC {
private void assertRetriesOnSocketTimeouts(Configuration conf, private void assertRetriesOnSocketTimeouts(Configuration conf,
int maxTimeoutRetries) throws IOException, InterruptedException { int maxTimeoutRetries) throws IOException, InterruptedException {
SocketFactory mockFactory = Mockito.mock(SocketFactory.class); SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
doThrow(new SocketTimeoutException()).when(mockFactory).createSocket(); doThrow(new ConnectTimeoutException("fake")).when(mockFactory).createSocket();
Client client = new Client(IntWritable.class, conf, mockFactory); Client client = new Client(IntWritable.class, conf, mockFactory);
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 9090); InetSocketAddress address = new InetSocketAddress("127.0.0.1", 9090);
try { try {

View File

@ -108,7 +108,7 @@ public abstract class GetGroupsTestBase {
for (String group : user.getGroupNames()) { for (String group : user.getGroupNames()) {
expectedOutput += " " + group; expectedOutput += " " + group;
} }
return expectedOutput + "\n"; return expectedOutput + System.getProperty("line.separator");
} }
private String runTool(Configuration conf, String[] args, boolean success) private String runTool(Configuration conf, String[] args, boolean success)

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.util; package org.apache.hadoop.util;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.string2long;
import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -26,6 +28,7 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.hadoop.test.UnitTestcaseTimeLimit; import org.apache.hadoop.test.UnitTestcaseTimeLimit;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
import org.junit.Test; import org.junit.Test;
public class TestStringUtils extends UnitTestcaseTimeLimit { public class TestStringUtils extends UnitTestcaseTimeLimit {
@ -134,45 +137,34 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
@Test @Test
public void testTraditionalBinaryPrefix() throws Exception { public void testTraditionalBinaryPrefix() throws Exception {
//test string2long(..)
String[] symbol = {"k", "m", "g", "t", "p", "e"}; String[] symbol = {"k", "m", "g", "t", "p", "e"};
long m = 1024; long m = 1024;
for(String s : symbol) { for(String s : symbol) {
assertEquals(0, StringUtils.TraditionalBinaryPrefix.string2long(0 + s)); assertEquals(0, string2long(0 + s));
assertEquals(m, StringUtils.TraditionalBinaryPrefix.string2long(1 + s)); assertEquals(m, string2long(1 + s));
m *= 1024; m *= 1024;
} }
assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0")); assertEquals(0L, string2long("0"));
assertEquals(1024L, StringUtils.TraditionalBinaryPrefix.string2long("1k")); assertEquals(1024L, string2long("1k"));
assertEquals(-1024L, StringUtils.TraditionalBinaryPrefix.string2long("-1k")); assertEquals(-1024L, string2long("-1k"));
assertEquals(1259520L, assertEquals(1259520L, string2long("1230K"));
StringUtils.TraditionalBinaryPrefix.string2long("1230K")); assertEquals(-1259520L, string2long("-1230K"));
assertEquals(-1259520L, assertEquals(104857600L, string2long("100m"));
StringUtils.TraditionalBinaryPrefix.string2long("-1230K")); assertEquals(-104857600L, string2long("-100M"));
assertEquals(104857600L, assertEquals(956703965184L, string2long("891g"));
StringUtils.TraditionalBinaryPrefix.string2long("100m")); assertEquals(-956703965184L, string2long("-891G"));
assertEquals(-104857600L, assertEquals(501377302265856L, string2long("456t"));
StringUtils.TraditionalBinaryPrefix.string2long("-100M")); assertEquals(-501377302265856L, string2long("-456T"));
assertEquals(956703965184L, assertEquals(11258999068426240L, string2long("10p"));
StringUtils.TraditionalBinaryPrefix.string2long("891g")); assertEquals(-11258999068426240L, string2long("-10P"));
assertEquals(-956703965184L, assertEquals(1152921504606846976L, string2long("1e"));
StringUtils.TraditionalBinaryPrefix.string2long("-891G")); assertEquals(-1152921504606846976L, string2long("-1E"));
assertEquals(501377302265856L,
StringUtils.TraditionalBinaryPrefix.string2long("456t"));
assertEquals(-501377302265856L,
StringUtils.TraditionalBinaryPrefix.string2long("-456T"));
assertEquals(11258999068426240L,
StringUtils.TraditionalBinaryPrefix.string2long("10p"));
assertEquals(-11258999068426240L,
StringUtils.TraditionalBinaryPrefix.string2long("-10P"));
assertEquals(1152921504606846976L,
StringUtils.TraditionalBinaryPrefix.string2long("1e"));
assertEquals(-1152921504606846976L,
StringUtils.TraditionalBinaryPrefix.string2long("-1E"));
String tooLargeNumStr = "10e"; String tooLargeNumStr = "10e";
try { try {
StringUtils.TraditionalBinaryPrefix.string2long(tooLargeNumStr); string2long(tooLargeNumStr);
fail("Test passed for a number " + tooLargeNumStr + " too large"); fail("Test passed for a number " + tooLargeNumStr + " too large");
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage()); assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
@ -180,7 +172,7 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
String tooSmallNumStr = "-10e"; String tooSmallNumStr = "-10e";
try { try {
StringUtils.TraditionalBinaryPrefix.string2long(tooSmallNumStr); string2long(tooSmallNumStr);
fail("Test passed for a number " + tooSmallNumStr + " too small"); fail("Test passed for a number " + tooSmallNumStr + " too small");
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage()); assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
@ -189,7 +181,7 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
String invalidFormatNumStr = "10kb"; String invalidFormatNumStr = "10kb";
char invalidPrefix = 'b'; char invalidPrefix = 'b';
try { try {
StringUtils.TraditionalBinaryPrefix.string2long(invalidFormatNumStr); string2long(invalidFormatNumStr);
fail("Test passed for a number " + invalidFormatNumStr fail("Test passed for a number " + invalidFormatNumStr
+ " has invalid format"); + " has invalid format");
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
@ -199,6 +191,74 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
e.getMessage()); e.getMessage());
} }
//test long2string(..)
assertEquals("0", long2String(0, null, 2));
for(int decimalPlace = 0; decimalPlace < 2; decimalPlace++) {
for(int n = 1; n < TraditionalBinaryPrefix.KILO.value; n++) {
assertEquals(n + "", long2String(n, null, decimalPlace));
assertEquals(-n + "", long2String(-n, null, decimalPlace));
}
assertEquals("1 K", long2String(1L << 10, null, decimalPlace));
assertEquals("-1 K", long2String(-1L << 10, null, decimalPlace));
}
assertEquals("8.00 E", long2String(Long.MAX_VALUE, null, 2));
assertEquals("8.00 E", long2String(Long.MAX_VALUE - 1, null, 2));
assertEquals("-8 E", long2String(Long.MIN_VALUE, null, 2));
assertEquals("-8.00 E", long2String(Long.MIN_VALUE + 1, null, 2));
final String[] zeros = {" ", ".0 ", ".00 "};
for(int decimalPlace = 0; decimalPlace < zeros.length; decimalPlace++) {
final String trailingZeros = zeros[decimalPlace];
for(int e = 11; e < Long.SIZE - 1; e++) {
final TraditionalBinaryPrefix p
= TraditionalBinaryPrefix.values()[e/10 - 1];
{ // n = 2^e
final long n = 1L << e;
final String expected = (n/p.value) + " " + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, 2));
}
{ // n = 2^e + 1
final long n = (1L << e) + 1;
final String expected = (n/p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
}
{ // n = 2^e - 1
final long n = (1L << e) - 1;
final String expected = ((n+1)/p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
}
}
}
assertEquals("1.50 K", long2String(3L << 9, null, 2));
assertEquals("1.5 K", long2String(3L << 9, null, 1));
assertEquals("1.50 M", long2String(3L << 19, null, 2));
assertEquals("2 M", long2String(3L << 19, null, 0));
assertEquals("3 G", long2String(3L << 30, null, 2));
// test byteDesc(..)
assertEquals("0 B", StringUtils.byteDesc(0));
assertEquals("-100 B", StringUtils.byteDesc(-100));
assertEquals("1 KB", StringUtils.byteDesc(1024));
assertEquals("1.50 KB", StringUtils.byteDesc(3L << 9));
assertEquals("1.50 MB", StringUtils.byteDesc(3L << 19));
assertEquals("3 GB", StringUtils.byteDesc(3L << 30));
// test formatPercent(..)
assertEquals("10%", StringUtils.formatPercent(0.1, 0));
assertEquals("10.0%", StringUtils.formatPercent(0.1, 1));
assertEquals("10.00%", StringUtils.formatPercent(0.1, 2));
assertEquals("1%", StringUtils.formatPercent(0.00543, 0));
assertEquals("0.5%", StringUtils.formatPercent(0.00543, 1));
assertEquals("0.54%", StringUtils.formatPercent(0.00543, 2));
assertEquals("0.543%", StringUtils.formatPercent(0.00543, 3));
assertEquals("0.5430%", StringUtils.formatPercent(0.00543, 4));
} }
@Test @Test
@ -314,10 +374,9 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
} }
long et = System.nanoTime(); long et = System.nanoTime();
if (outer > 3) { if (outer > 3) {
System.out.println( System.out.println( (useOurs ? "StringUtils impl" : "Java impl")
(useOurs ? "StringUtils impl" : "Java impl") + + " #" + outer + ":" + (et - st)/1000000 + "ms, components="
" #" + outer + ":" + + components );
(et - st)/1000000 + "ms");
} }
} }
} }

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.fs.http.client; package org.apache.hadoop.fs.http.client;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
@ -86,6 +88,7 @@ public class HttpFSFileSystem extends FileSystem
public static final String PERMISSION_PARAM = "permission"; public static final String PERMISSION_PARAM = "permission";
public static final String DESTINATION_PARAM = "destination"; public static final String DESTINATION_PARAM = "destination";
public static final String RECURSIVE_PARAM = "recursive"; public static final String RECURSIVE_PARAM = "recursive";
public static final String SOURCES_PARAM = "sources";
public static final String OWNER_PARAM = "owner"; public static final String OWNER_PARAM = "owner";
public static final String GROUP_PARAM = "group"; public static final String GROUP_PARAM = "group";
public static final String MODIFICATION_TIME_PARAM = "modificationtime"; public static final String MODIFICATION_TIME_PARAM = "modificationtime";
@ -167,7 +170,7 @@ public class HttpFSFileSystem extends FileSystem
GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET), GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET), GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET),
INSTRUMENTATION(HTTP_GET), INSTRUMENTATION(HTTP_GET),
APPEND(HTTP_POST), APPEND(HTTP_POST), CONCAT(HTTP_POST),
CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT), CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT), SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
DELETE(HTTP_DELETE); DELETE(HTTP_DELETE);
@ -528,6 +531,29 @@ public class HttpFSFileSystem extends FileSystem
HttpURLConnection.HTTP_OK); HttpURLConnection.HTTP_OK);
} }
/**
* Concat existing files together.
* @param f the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.
*
* @throws IOException
*/
@Override
public void concat(Path f, Path[] psrcs) throws IOException {
List<String> strPaths = new ArrayList<String>(psrcs.length);
for(Path psrc : psrcs) {
strPaths.add(psrc.toUri().getPath());
}
String srcs = StringUtils.join(",", strPaths);
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.CONCAT.toString());
params.put(SOURCES_PARAM, srcs);
HttpURLConnection conn = getConnection(Operation.CONCAT.getMethod(),
params, f, true);
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/** /**
* Renames Path src to Path dst. Can take place on local fs * Renames Path src to Path dst. Can take place on local fs
* or remote DFS. * or remote DFS.

View File

@ -198,6 +198,47 @@ public class FSOperations {
} }
/**
* Executor that performs an append FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSConcat implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private Path[] sources;
/**
* Creates a Concat executor.
*
* @param path target path to concat to.
* @param sources comma seperated absolute paths to use as sources.
*/
public FSConcat(String path, String[] sources) {
this.sources = new Path[sources.length];
for(int i = 0; i < sources.length; i++) {
this.sources[i] = new Path(sources[i]);
}
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.concat(path, sources);
return null;
}
}
/** /**
* Executor that performs a content-summary FileSystemAccess files system operation. * Executor that performs a content-summary FileSystemAccess files system operation.
*/ */

View File

@ -58,6 +58,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class}); PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
PARAMS_DEF.put(Operation.APPEND, PARAMS_DEF.put(Operation.APPEND,
new Class[]{DoAsParam.class, DataParam.class}); new Class[]{DoAsParam.class, DataParam.class});
PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
PARAMS_DEF.put(Operation.CREATE, PARAMS_DEF.put(Operation.CREATE,
new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class, new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class,
ReplicationParam.class, BlockSizeParam.class, DataParam.class}); ReplicationParam.class, BlockSizeParam.class, DataParam.class});
@ -388,6 +389,25 @@ public class HttpFSParametersProvider extends ParametersProvider {
} }
} }
/**
* Class for concat sources parameter.
*/
@InterfaceAudience.Private
public static class SourcesParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.SOURCES_PARAM;
/**
* Constructor.
*/
public SourcesParam() {
super(NAME, null);
}
}
/** /**
* Class for to-path parameter. * Class for to-path parameter.
*/ */

View File

@ -22,22 +22,23 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
import org.apache.hadoop.lib.service.FileSystemAccess; import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException; import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Groups; import org.apache.hadoop.lib.service.Groups;
@ -403,9 +404,9 @@ public class HttpFSServer {
Response response; Response response;
path = makeAbsolute(path); path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
switch (op.value()) { switch (op.value()) {
case APPEND: { case APPEND: {
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
Boolean hasData = params.get(DataParam.NAME, DataParam.class); Boolean hasData = params.get(DataParam.NAME, DataParam.class);
if (!hasData) { if (!hasData) {
response = Response.temporaryRedirect( response = Response.temporaryRedirect(
@ -420,6 +421,18 @@ public class HttpFSServer {
} }
break; break;
} }
case CONCAT: {
System.out.println("HTTPFS SERVER CONCAT");
String sources = params.get(SourcesParam.NAME, SourcesParam.class);
FSOperations.FSConcat command =
new FSOperations.FSConcat(path, sources.split(","));
fsExecute(user, null, command);
AUDIT_LOG.info("[{}]", path);
System.out.println("SENT RESPONSE");
response = Response.ok().build();
break;
}
default: { default: {
throw new IOException( throw new IOException(
MessageFormat.format("Invalid HTTP POST operation [{0}]", MessageFormat.format("Invalid HTTP POST operation [{0}]",

View File

@ -28,6 +28,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp; import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.HFSTestCase; import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper; import org.apache.hadoop.test.HadoopUsersConfTestHelper;
@ -206,6 +208,30 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
} }
} }
private void testConcat() throws Exception {
Configuration config = getProxiedFSConf();
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(config);
fs.mkdirs(getProxiedFSTestDir());
Path path1 = new Path("/test/foo.txt");
Path path2 = new Path("/test/bar.txt");
Path path3 = new Path("/test/derp.txt");
DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
fs.close();
fs = getHttpFSFileSystem();
fs.concat(path1, new Path[]{path2, path3});
fs.close();
fs = FileSystem.get(config);
Assert.assertTrue(fs.exists(path1));
Assert.assertFalse(fs.exists(path2));
Assert.assertFalse(fs.exists(path3));
fs.close();
}
}
private void testRename() throws Exception { private void testRename() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf()); FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo"); Path path = new Path(getProxiedFSTestDir(), "foo");
@ -450,7 +476,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
} }
protected enum Operation { protected enum Operation {
GET, OPEN, CREATE, APPEND, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS, GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
} }
@ -468,6 +494,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
case APPEND: case APPEND:
testAppend(); testAppend();
break; break;
case CONCAT:
testConcat();
case RENAME: case RENAME:
testRename(); testRename();
break; break;

View File

@ -296,7 +296,28 @@ Trunk (Unreleased)
HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh) HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh)
Release 2.0.3-alpha - Unreleased HDFS-4340. Update addBlock() to inculde inode id as additional argument.
(Brandon Li via suresh)
Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
HDFS-4470. Several HDFS tests attempt file operations on invalid HDFS
paths when running on Windows. (Chris Nauroth via suresh)
HDFS-4471. Namenode WebUI file browsing does not work with wildcard
addresses configured. (Andrew Wang via atm)
Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -317,6 +338,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4451. hdfs balancer command returns exit code 1 on success instead HDFS-4451. hdfs balancer command returns exit code 1 on success instead
of 0. (Joshua Blatt via suresh) of 0. (Joshua Blatt via suresh)
HDFS-4350. Make enabling of stale marking on read and write paths
independent. (Andrew Wang via suresh)
NEW FEATURES NEW FEATURES
@ -505,6 +529,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-3598. WebHDFS support for file concat. (Plamen Jeliazkov via shv) HDFS-3598. WebHDFS support for file concat. (Plamen Jeliazkov via shv)
HDFS-4456. Add concat to HttpFS and WebHDFS REST API docs. (plamenj2003 via tucu)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-3429. DataNode reads checksums even if client does not need them (todd) HDFS-3429. DataNode reads checksums even if client does not need them (todd)
@ -742,6 +768,28 @@ Release 2.0.3-alpha - Unreleased
HDFS-4428. FsDatasetImpl should disclose what the error is when a rename HDFS-4428. FsDatasetImpl should disclose what the error is when a rename
fails. (Colin Patrick McCabe via atm) fails. (Colin Patrick McCabe via atm)
HDFS-4452. getAdditionalBlock() can create multiple blocks if the client
times out and retries. (shv)
HDFS-4445. All BKJM ledgers are not checked while tailing, So failover will fail.
(Vinay via umamahesh)
HDFS-4462. 2NN will fail to checkpoint after an HDFS upgrade from a
pre-federation version of HDFS. (atm)
HDFS-4404. Create file failure when the machine of first attempted NameNode
is down. (Todd Lipcon via atm)
HDFS-4344. dfshealth.jsp throws NumberFormatException when
dfs.hosts/dfs.hosts.exclude includes port number. (Andy Isaacson via atm)
HDFS-4468. Use the new StringUtils methods added by HADOOP-9252 and fix
TestHDFSCLI and TestQuota. (szetszwo)
HDFS-4458. In DFSUtil.getNameServiceUris(..), convert default fs URI using
NetUtils.createSocketAddr(..) for being consistent with other addresses.
(Binglin Chang via szetszwo)
BREAKDOWN OF HDFS-3077 SUBTASKS BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs. HDFS-3077. Quorum-based protocol for reading and writing edit logs.

View File

@ -503,7 +503,8 @@ public class BookKeeperJournalManager implements JournalManager {
@Override @Override
public void selectInputStreams(Collection<EditLogInputStream> streams, public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxId, boolean inProgressOk) throws IOException { long fromTxId, boolean inProgressOk) throws IOException {
List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(inProgressOk); List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(fromTxId,
inProgressOk);
try { try {
BookKeeperEditLogInputStream elis = null; BookKeeperEditLogInputStream elis = null;
for (EditLogLedgerMetadata l : currentLedgerList) { for (EditLogLedgerMetadata l : currentLedgerList) {
@ -511,6 +512,8 @@ public class BookKeeperJournalManager implements JournalManager {
if (l.isInProgress()) { if (l.isInProgress()) {
lastTxId = recoverLastTxId(l, false); lastTxId = recoverLastTxId(l, false);
} }
// Check once again, required in case of InProgress and is case of any
// gap.
if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) { if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
LedgerHandle h; LedgerHandle h;
if (l.isInProgress()) { // we don't want to fence the current journal if (l.isInProgress()) { // we don't want to fence the current journal
@ -523,6 +526,8 @@ public class BookKeeperJournalManager implements JournalManager {
elis = new BookKeeperEditLogInputStream(h, l); elis = new BookKeeperEditLogInputStream(h, l);
elis.skipTo(fromTxId); elis.skipTo(fromTxId);
} else { } else {
// If mismatches then there might be some gap, so we should not check
// further.
return; return;
} }
streams.add(elis); streams.add(elis);
@ -732,6 +737,11 @@ public class BookKeeperJournalManager implements JournalManager {
*/ */
List<EditLogLedgerMetadata> getLedgerList(boolean inProgressOk) List<EditLogLedgerMetadata> getLedgerList(boolean inProgressOk)
throws IOException { throws IOException {
return getLedgerList(-1, inProgressOk);
}
private List<EditLogLedgerMetadata> getLedgerList(long fromTxId,
boolean inProgressOk) throws IOException {
List<EditLogLedgerMetadata> ledgers List<EditLogLedgerMetadata> ledgers
= new ArrayList<EditLogLedgerMetadata>(); = new ArrayList<EditLogLedgerMetadata>();
try { try {
@ -744,6 +754,12 @@ public class BookKeeperJournalManager implements JournalManager {
try { try {
EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata
.read(zkc, legderMetadataPath); .read(zkc, legderMetadataPath);
if (editLogLedgerMetadata.getLastTxId() != HdfsConstants.INVALID_TXID
&& editLogLedgerMetadata.getLastTxId() < fromTxId) {
// exclude already read closed edits, but include inprogress edits
// as this will be handled in caller
continue;
}
ledgers.add(editLogLedgerMetadata); ledgers.add(editLogLedgerMetadata);
} catch (KeeperException.NoNodeException e) { } catch (KeeperException.NoNodeException e) {
LOG.warn("ZNode: " + legderMetadataPath LOG.warn("ZNode: " + legderMetadataPath

View File

@ -21,7 +21,6 @@ import static org.junit.Assert.*;
import org.junit.Test; import org.junit.Test;
import org.junit.Before; import org.junit.Before;
import org.junit.After;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -34,11 +33,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -352,4 +349,42 @@ public class TestBookKeeperAsHASharedDir {
} }
} }
} }
/**
* NameNode should load the edits correctly if the applicable edits are
* present in the BKJM.
*/
@Test
public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
.createJournalURI("/correctEditLogSelection").toString());
BKJMUtil.addJournalManagerDefinition(conf);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
.manageNameDfsSharedDirs(false).build();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active.
// Transition to standby current active gracefully.
cluster.transitionToStandby(0);
// Make the other Active and Roll edits multiple times
cluster.transitionToActive(1);
nn2.getRpcServer().rollEditLog();
nn2.getRpcServer().rollEditLog();
// Now One more failover. So NN1 should be able to failover successfully.
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }

View File

@ -181,10 +181,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
// Whether to enable datanode's stale state detection and usage // Whether to enable datanode's stale state detection and usage for reads
public static final String DFS_NAMENODE_CHECK_STALE_DATANODE_KEY = "dfs.namenode.check.stale.datanode"; public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
public static final boolean DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT = false; public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;
// Whether to enable datanode's stale state detection and usage // Whether to enable datanode's stale state detection and usage for writes
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = "dfs.namenode.avoid.write.stale.datanode"; public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = "dfs.namenode.avoid.write.stale.datanode";
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false; public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false;
// The default value of the time interval for marking datanodes as stale // The default value of the time interval for marking datanodes as stale
@ -195,8 +195,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_KEY = "dfs.namenode.stale.datanode.minimum.interval"; public static final String DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_KEY = "dfs.namenode.stale.datanode.minimum.interval";
public static final int DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT = 3; // i.e. min_interval is 3 * heartbeat_interval = 9s public static final int DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT = 3; // i.e. min_interval is 3 * heartbeat_interval = 9s
// When the number stale datanodes marked as stale reached this certian ratio, // When the percentage of stale datanodes reaches this ratio,
// stop avoiding writing to stale nodes so as to prevent causing hotspots. // allow writing to stale nodes to prevent hotspots.
public static final String DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY = "dfs.namenode.write.stale.datanode.ratio"; public static final String DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY = "dfs.namenode.write.stale.datanode.ratio";
public static final float DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT = 0.5f; public static final float DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT = 0.5f;

View File

@ -116,6 +116,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
private volatile boolean closed = false; private volatile boolean closed = false;
private String src; private String src;
private final long fileId;
private final long blockSize; private final long blockSize;
private final DataChecksum checksum; private final DataChecksum checksum;
// both dataQueue and ackQueue are protected by dataQueue lock // both dataQueue and ackQueue are protected by dataQueue lock
@ -1149,7 +1150,8 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
long localstart = Time.now(); long localstart = Time.now();
while (true) { while (true) {
try { try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName, block, excludedNodes); return dfsClient.namenode.addBlock(src, dfsClient.clientName,
block, excludedNodes, fileId);
} catch (RemoteException e) { } catch (RemoteException e) {
IOException ue = IOException ue =
e.unwrapRemoteException(FileNotFoundException.class, e.unwrapRemoteException(FileNotFoundException.class,
@ -1262,20 +1264,21 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
return value; return value;
} }
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize, Progressable progress, private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
DataChecksum checksum, short replication) throws IOException { HdfsFileStatus stat, DataChecksum checksum) throws IOException {
super(checksum, checksum.getBytesPerChecksum(), checksum.getChecksumSize()); super(checksum, checksum.getBytesPerChecksum(), checksum.getChecksumSize());
int bytesPerChecksum = checksum.getBytesPerChecksum();
this.dfsClient = dfsClient; this.dfsClient = dfsClient;
this.src = src; this.src = src;
this.blockSize = blockSize; this.fileId = stat.getFileId();
this.blockReplication = replication; this.blockSize = stat.getBlockSize();
this.blockReplication = stat.getReplication();
this.progress = progress; this.progress = progress;
if ((progress != null) && DFSClient.LOG.isDebugEnabled()) { if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug( DFSClient.LOG.debug(
"Set non-null progress callback on DFSOutputStream " + src); "Set non-null progress callback on DFSOutputStream " + src);
} }
final int bytesPerChecksum = checksum.getBytesPerChecksum();
if ( bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) { if ( bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum + throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum +
") and blockSize(" + blockSize + ") and blockSize(" + blockSize +
@ -1287,19 +1290,27 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
} }
/** Construct a new output stream for creating a file. */ /** Construct a new output stream for creating a file. */
private DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked, private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat,
EnumSet<CreateFlag> flag, boolean createParent, short replication, EnumSet<CreateFlag> flag, Progressable progress,
long blockSize, Progressable progress, int buffersize,
DataChecksum checksum) throws IOException { DataChecksum checksum) throws IOException {
this(dfsClient, src, blockSize, progress, checksum, replication); this(dfsClient, src, progress, stat, checksum);
this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);
computePacketChunkSize(dfsClient.getConf().writePacketSize, computePacketChunkSize(dfsClient.getConf().writePacketSize,
checksum.getBytesPerChecksum()); checksum.getBytesPerChecksum());
streamer = new DataStreamer();
}
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize, Progressable progress, int buffersize,
DataChecksum checksum) throws IOException {
final HdfsFileStatus stat;
try { try {
dfsClient.namenode.create( stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
src, masked, dfsClient.clientName, new EnumSetWritable<CreateFlag>(flag), createParent, replication, blockSize); new EnumSetWritable<CreateFlag>(flag), createParent, replication,
blockSize);
} catch(RemoteException re) { } catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class, throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class, DSQuotaExceededException.class,
@ -1311,30 +1322,20 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
UnresolvedPathException.class, UnresolvedPathException.class,
SnapshotAccessControlException.class); SnapshotAccessControlException.class);
} }
streamer = new DataStreamer(); final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
} flag, progress, checksum);
out.start();
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize, Progressable progress, int buffersize,
DataChecksum checksum) throws IOException {
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, masked,
flag, createParent, replication, blockSize, progress, buffersize,
checksum);
out.streamer.start();
return out; return out;
} }
/** Construct a new output stream for append. */ /** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress, private DFSOutputStream(DFSClient dfsClient, String src,
LocatedBlock lastBlock, HdfsFileStatus stat, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat,
DataChecksum checksum) throws IOException { DataChecksum checksum) throws IOException {
this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication()); this(dfsClient, src, progress, stat, checksum);
initialFileSize = stat.getLen(); // length of file when opened initialFileSize = stat.getLen(); // length of file when opened
//
// The last partial block of the file has to be filled. // The last partial block of the file has to be filled.
//
if (lastBlock != null) { if (lastBlock != null) {
// indicate that we are appending to an existing block // indicate that we are appending to an existing block
bytesCurBlock = lastBlock.getBlockSize(); bytesCurBlock = lastBlock.getBlockSize();
@ -1349,9 +1350,9 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src, static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
int buffersize, Progressable progress, LocatedBlock lastBlock, int buffersize, Progressable progress, LocatedBlock lastBlock,
HdfsFileStatus stat, DataChecksum checksum) throws IOException { HdfsFileStatus stat, DataChecksum checksum) throws IOException {
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, buffersize, final DFSOutputStream out = new DFSOutputStream(dfsClient, src,
progress, lastBlock, stat, checksum); progress, lastBlock, stat, checksum);
out.streamer.start(); out.start();
return out; return out;
} }
@ -1718,6 +1719,10 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
isClosed(); isClosed();
} }
private synchronized void start() {
streamer.start();
}
/** /**
* Aborts this output stream and releases any system * Aborts this output stream and releases any system
* resources associated with this stream. * resources associated with this stream.

View File

@ -134,7 +134,7 @@ public class DFSUtil {
/** /**
* Comparator for sorting DataNodeInfo[] based on decommissioned/stale states. * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
* Decommissioned/stale nodes are moved to the end of the array on sorting * Decommissioned/stale nodes are moved to the end of the array on sorting
* with this compartor. * with this comparator.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public static class DecomStaleComparator implements Comparator<DatanodeInfo> { public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
@ -144,7 +144,7 @@ public class DFSUtil {
* Constructor of DecomStaleComparator * Constructor of DecomStaleComparator
* *
* @param interval * @param interval
* The time invertal for marking datanodes as stale is passed from * The time interval for marking datanodes as stale is passed from
* outside, since the interval may be changed dynamically * outside, since the interval may be changed dynamically
*/ */
public DecomStaleComparator(long interval) { public DecomStaleComparator(long interval) {
@ -780,6 +780,13 @@ public class DFSUtil {
// Add the default URI if it is an HDFS URI. // Add the default URI if it is an HDFS URI.
URI defaultUri = FileSystem.getDefaultUri(conf); URI defaultUri = FileSystem.getDefaultUri(conf);
// checks if defaultUri is ip:port format
// and convert it to hostname:port format
if (defaultUri != null && (defaultUri.getPort() != -1)) {
defaultUri = createUri(defaultUri.getScheme(),
NetUtils.createSocketAddr(defaultUri.getHost(),
defaultUri.getPort()));
}
if (defaultUri != null && if (defaultUri != null &&
HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) && HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
!nonPreferredUris.contains(defaultUri)) { !nonPreferredUris.contains(defaultUri)) {
@ -939,6 +946,11 @@ public class DFSUtil {
return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
} }
/** Convert percentage to a string. */
public static String percent2String(double percentage) {
return StringUtils.format("%.2f%%", percentage);
}
/** /**
* Round bytes to GiB (gibibyte) * Round bytes to GiB (gibibyte)
* @param bytes number of bytes * @param bytes number of bytes

View File

@ -151,6 +151,8 @@ public interface ClientProtocol {
* @param replication block replication factor. * @param replication block replication factor.
* @param blockSize maximum block size. * @param blockSize maximum block size.
* *
* @return the status of the created file, it could be null if the server
* doesn't support returning the file status
* @throws AccessControlException If access is denied * @throws AccessControlException If access is denied
* @throws AlreadyBeingCreatedException if the path does not exist. * @throws AlreadyBeingCreatedException if the path does not exist.
* @throws DSQuotaExceededException If file creation violates disk space * @throws DSQuotaExceededException If file creation violates disk space
@ -170,13 +172,14 @@ public interface ClientProtocol {
* RuntimeExceptions: * RuntimeExceptions:
* @throws InvalidPathException Path <code>src</code> is invalid * @throws InvalidPathException Path <code>src</code> is invalid
*/ */
public void create(String src, FsPermission masked, String clientName, public HdfsFileStatus create(String src, FsPermission masked,
EnumSetWritable<CreateFlag> flag, boolean createParent, String clientName, EnumSetWritable<CreateFlag> flag,
short replication, long blockSize) throws AccessControlException, boolean createParent, short replication, long blockSize)
AlreadyBeingCreatedException, DSQuotaExceededException, throws AccessControlException, AlreadyBeingCreatedException,
FileAlreadyExistsException, FileNotFoundException, DSQuotaExceededException, FileAlreadyExistsException,
NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, FileNotFoundException, NSQuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException, IOException; ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
SnapshotAccessControlException, IOException;
/** /**
* Append to the end of the file. * Append to the end of the file.
@ -302,6 +305,7 @@ public interface ClientProtocol {
* @param previous previous block * @param previous previous block
* @param excludeNodes a list of nodes that should not be * @param excludeNodes a list of nodes that should not be
* allocated for the current block * allocated for the current block
* @param fileId the id uniquely identifying a file
* *
* @return LocatedBlock allocated block information. * @return LocatedBlock allocated block information.
* *
@ -316,7 +320,7 @@ public interface ClientProtocol {
*/ */
@Idempotent @Idempotent
public LocatedBlock addBlock(String src, String clientName, public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludeNodes) ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId)
throws AccessControlException, FileNotFoundException, throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException; IOException;

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
@ -41,9 +41,9 @@ public class DSQuotaExceededException extends QuotaExceededException {
public String getMessage() { public String getMessage() {
String msg = super.getMessage(); String msg = super.getMessage();
if (msg == null) { if (msg == null) {
return "The DiskSpace quota" + (pathName==null?"":(" of " + pathName)) + return "The DiskSpace quota" + (pathName==null?"": " of " + pathName)
" is exceeded: quota=" + StringUtils.humanReadableInt(quota) + + " is exceeded: quota = " + quota + " B = " + long2String(quota, "B", 2)
" diskspace consumed=" + StringUtils.humanReadableInt(count); + " but diskspace consumed = " + count + " B = " + long2String(count, "B", 2);
} else { } else {
return msg; return msg;
} }

View File

@ -17,10 +17,13 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
import java.util.Date; import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
@ -244,8 +247,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n"); buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n"); buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n"); buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
buffer.append("DFS Used%: "+StringUtils.limitDecimalTo2(usedPercent)+"%\n"); buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
buffer.append("DFS Remaining%: "+StringUtils.limitDecimalTo2(remainingPercent)+"%\n"); buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
buffer.append("Last contact: "+new Date(lastUpdate)+"\n"); buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
return buffer.toString(); return buffer.toString();
} }
@ -269,7 +272,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
} }
buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")"); buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")"); buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
buffer.append(" " + StringUtils.limitDecimalTo2(((1.0*u)/c)*100)+"%"); buffer.append(" " + percent2String(u/(double)c));
buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")"); buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
buffer.append(" " + new Date(lastUpdate)); buffer.append(" " + new Date(lastUpdate));
return buffer.toString(); return buffer.toString();

View File

@ -40,6 +40,7 @@ public class HdfsFileStatus {
private FsPermission permission; private FsPermission permission;
private String owner; private String owner;
private String group; private String group;
private long fileId;
public static final byte[] EMPTY_NAME = new byte[0]; public static final byte[] EMPTY_NAME = new byte[0];
@ -55,11 +56,12 @@ public class HdfsFileStatus {
* @param owner the owner of the path * @param owner the owner of the path
* @param group the group of the path * @param group the group of the path
* @param path the local name in java UTF8 encoding the same as that in-memory * @param path the local name in java UTF8 encoding the same as that in-memory
* @param fileId the file id
*/ */
public HdfsFileStatus(long length, boolean isdir, int block_replication, public HdfsFileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time, long access_time, long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group, FsPermission permission, String owner, String group,
byte[] symlink, byte[] path) { byte[] symlink, byte[] path, long fileId) {
this.length = length; this.length = length;
this.isdir = isdir; this.isdir = isdir;
this.block_replication = (short)block_replication; this.block_replication = (short)block_replication;
@ -75,6 +77,7 @@ public class HdfsFileStatus {
this.group = (group == null) ? "" : group; this.group = (group == null) ? "" : group;
this.symlink = symlink; this.symlink = symlink;
this.path = path; this.path = path;
this.fileId = fileId;
} }
/** /**
@ -223,4 +226,8 @@ public class HdfsFileStatus {
final public byte[] getSymlinkInBytes() { final public byte[] getSymlinkInBytes() {
return symlink; return symlink;
} }
final public long getFileId() {
return fileId;
}
} }

View File

@ -44,19 +44,19 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
* @param group group * @param group group
* @param symlink symbolic link * @param symlink symbolic link
* @param path local path name in java UTF8 format * @param path local path name in java UTF8 format
* @param fileId the file id
* @param locations block locations * @param locations block locations
*/ */
public HdfsLocatedFileStatus(long length, boolean isdir, public HdfsLocatedFileStatus(long length, boolean isdir,
int block_replication, int block_replication, long blocksize, long modification_time,
long blocksize, long modification_time, long access_time, long access_time, FsPermission permission, String owner, String group,
FsPermission permission, String owner, String group, byte[] symlink, byte[] path, long fileId, LocatedBlocks locations) {
byte[] symlink, byte[] path, LocatedBlocks locations) { super(length, isdir, block_replication, blocksize, modification_time,
super(length, isdir, block_replication, blocksize, modification_time, access_time, permission, owner, group, symlink, path, fileId);
access_time, permission, owner, group, symlink, path);
this.locations = locations; this.locations = locations;
} }
public LocatedBlocks getBlockLocations() { public LocatedBlocks getBlockLocations() {
return locations; return locations;
} }
} }

View File

@ -40,8 +40,9 @@ public class SnapshottableDirectoryStatus {
public SnapshottableDirectoryStatus(long modification_time, long access_time, public SnapshottableDirectoryStatus(long modification_time, long access_time,
FsPermission permission, String owner, String group, byte[] localName, FsPermission permission, String owner, String group, byte[] localName,
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
//TODO: fix fileId
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName); access_time, permission, owner, group, null, localName, 0L);
this.snapshotNumber = snapshotNumber; this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota; this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath; this.parentFullPath = parentFullPath;

View File

@ -297,14 +297,19 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public CreateResponseProto create(RpcController controller, public CreateResponseProto create(RpcController controller,
CreateRequestProto req) throws ServiceException { CreateRequestProto req) throws ServiceException {
try { try {
server.create(req.getSrc(), PBHelper.convert(req.getMasked()), HdfsFileStatus result = server.create(req.getSrc(),
req.getClientName(), PBHelper.convert(req.getCreateFlag()), PBHelper.convert(req.getMasked()), req.getClientName(),
req.getCreateParent(), (short) req.getReplication(), PBHelper.convert(req.getCreateFlag()), req.getCreateParent(),
req.getBlockSize()); (short) req.getReplication(), req.getBlockSize());
if (result != null) {
return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result))
.build();
}
return VOID_CREATE_RESPONSE;
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
return VOID_CREATE_RESPONSE;
} }
@Override @Override
@ -377,13 +382,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
try { try {
List<DatanodeInfoProto> excl = req.getExcludeNodesList(); List<DatanodeInfoProto> excl = req.getExcludeNodesList();
LocatedBlock result = server.addBlock(req.getSrc(), req.getClientName(), LocatedBlock result = server.addBlock(
req.getSrc(),
req.getClientName(),
req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null, req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
(excl == null || (excl == null || excl.size() == 0) ? null : PBHelper.convert(excl
excl.size() == 0) ? null : .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId());
PBHelper.convert(excl.toArray(new DatanodeInfoProto[excl.size()]))); return AddBlockResponseProto.newBuilder()
return AddBlockResponseProto.newBuilder().setBlock( .setBlock(PBHelper.convert(result)).build();
PBHelper.convert(result)).build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Comple
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
@ -110,6 +111,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -204,13 +206,14 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public void create(String src, FsPermission masked, String clientName, public HdfsFileStatus create(String src, FsPermission masked,
EnumSetWritable<CreateFlag> flag, boolean createParent, String clientName, EnumSetWritable<CreateFlag> flag,
short replication, long blockSize) throws AccessControlException, boolean createParent, short replication, long blockSize)
AlreadyBeingCreatedException, DSQuotaExceededException, throws AccessControlException, AlreadyBeingCreatedException,
FileAlreadyExistsException, FileNotFoundException, DSQuotaExceededException, FileAlreadyExistsException,
NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, FileNotFoundException, NSQuotaExceededException,
UnresolvedLinkException, IOException { ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
IOException {
CreateRequestProto req = CreateRequestProto.newBuilder() CreateRequestProto req = CreateRequestProto.newBuilder()
.setSrc(src) .setSrc(src)
.setMasked(PBHelper.convert(masked)) .setMasked(PBHelper.convert(masked))
@ -221,7 +224,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
.setBlockSize(blockSize) .setBlockSize(blockSize)
.build(); .build();
try { try {
rpcProxy.create(null, req); CreateResponseProto res = rpcProxy.create(null, req);
return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
@ -305,15 +309,15 @@ public class ClientNamenodeProtocolTranslatorPB implements
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override @Override
public LocatedBlock addBlock(String src, String clientName, public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludeNodes) ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId)
throws AccessControlException, FileNotFoundException, throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException { IOException {
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder().setSrc(src) AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
.setClientName(clientName); .setSrc(src).setClientName(clientName).setFileId(fileId);
if (previous != null) if (previous != null)
req.setPrevious(PBHelper.convert(previous)); req.setPrevious(PBHelper.convert(previous));
if (excludeNodes != null) if (excludeNodes != null)

View File

@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@ -1052,6 +1053,7 @@ public class PBHelper {
fs.getFileType().equals(FileType.IS_SYMLINK) ? fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null, fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(), fs.getPath().toByteArray(),
fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null); fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null);
} }
@ -1091,6 +1093,7 @@ public class PBHelper {
setPermission(PBHelper.convert(fs.getPermission())). setPermission(PBHelper.convert(fs.getPermission())).
setOwner(fs.getOwner()). setOwner(fs.getOwner()).
setGroup(fs.getGroup()). setGroup(fs.getGroup()).
setFileId(fs.getFileId()).
setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
if (fs.isSymlink()) { if (fs.isSymlink()) {
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));

View File

@ -126,15 +126,26 @@ public class DatanodeManager {
private final long heartbeatExpireInterval; private final long heartbeatExpireInterval;
/** Ask Datanode only up to this many blocks to delete. */ /** Ask Datanode only up to this many blocks to delete. */
final int blockInvalidateLimit; final int blockInvalidateLimit;
/** Whether or not to check stale DataNodes for read/write */
private final boolean checkForStaleDataNodes;
/** The interval for judging stale DataNodes for read/write */ /** The interval for judging stale DataNodes for read/write */
private final long staleInterval; private final long staleInterval;
/** Whether or not to avoid using stale DataNodes for writing */ /** Whether or not to avoid using stale DataNodes for reading */
private volatile boolean avoidStaleDataNodesForWrite; private final boolean avoidStaleDataNodesForRead;
/**
* Whether or not to avoid using stale DataNodes for writing.
* Note that, even if this is configured, the policy may be
* temporarily disabled when a high percentage of the nodes
* are marked as stale.
*/
private final boolean avoidStaleDataNodesForWrite;
/**
* When the ratio of stale datanodes reaches this number, stop avoiding
* writing to stale datanodes, i.e., continue using stale nodes for writing.
*/
private final float ratioUseStaleDataNodesForWrite;
/** The number of stale DataNodes */ /** The number of stale DataNodes */
private volatile int numStaleNodes; private volatile int numStaleNodes;
@ -183,14 +194,23 @@ public class DatanodeManager {
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit); DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ "=" + this.blockInvalidateLimit); + "=" + this.blockInvalidateLimit);
checkForStaleDataNodes = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT);
staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval); this.avoidStaleDataNodesForRead = conf.getBoolean(
avoidStaleDataNodesForWrite = getAvoidStaleForWriteFromConf(conf, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
checkForStaleDataNodes); DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
this.avoidStaleDataNodesForWrite = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
this.ratioUseStaleDataNodesForWrite = conf.getFloat(
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
Preconditions.checkArgument(
(ratioUseStaleDataNodesForWrite > 0 &&
ratioUseStaleDataNodesForWrite <= 1.0f),
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
" = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
"It should be a positive non-zero float value, not greater than 1.0f.");
} }
private static long getStaleIntervalFromConf(Configuration conf, private static long getStaleIntervalFromConf(Configuration conf,
@ -230,22 +250,6 @@ public class DatanodeManager {
return staleInterval; return staleInterval;
} }
static boolean getAvoidStaleForWriteFromConf(Configuration conf,
boolean checkForStale) {
boolean avoid = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
boolean avoidStaleDataNodesForWrite = checkForStale && avoid;
if (!checkForStale && avoid) {
LOG.warn("Cannot set "
+ DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY
+ " as false while setting "
+ DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY
+ " as true.");
}
return avoidStaleDataNodesForWrite;
}
void activate(final Configuration conf) { void activate(final Configuration conf) {
final DecommissionManager dm = new DecommissionManager(namesystem, blockManager); final DecommissionManager dm = new DecommissionManager(namesystem, blockManager);
this.decommissionthread = new Daemon(dm.new Monitor( this.decommissionthread = new Daemon(dm.new Monitor(
@ -299,7 +303,7 @@ public class DatanodeManager {
client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost); client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
} }
Comparator<DatanodeInfo> comparator = checkForStaleDataNodes ? Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ?
new DFSUtil.DecomStaleComparator(staleInterval) : new DFSUtil.DecomStaleComparator(staleInterval) :
DFSUtil.DECOM_COMPARATOR; DFSUtil.DECOM_COMPARATOR;
@ -825,32 +829,20 @@ public class DatanodeManager {
} }
/* Getter and Setter for stale DataNodes related attributes */ /* Getter and Setter for stale DataNodes related attributes */
/**
* @return whether or not to avoid writing to stale datanodes
*/
public boolean isAvoidingStaleDataNodesForWrite() {
return avoidStaleDataNodesForWrite;
}
/** /**
* Set the value of {@link DatanodeManager#avoidStaleDataNodesForWrite}. * Whether stale datanodes should be avoided as targets on the write path.
* The HeartbeatManager disable avoidStaleDataNodesForWrite when more than * The result of this function may change if the number of stale datanodes
* half of the DataNodes are marked as stale. * eclipses a configurable threshold.
* *
* @param avoidStaleDataNodesForWrite * @return whether stale datanodes should be avoided on the write path
* The value to set to
* {@link DatanodeManager#avoidStaleDataNodesForWrite}
*/ */
void setAvoidStaleDataNodesForWrite(boolean avoidStaleDataNodesForWrite) { public boolean shouldAvoidStaleDataNodesForWrite() {
this.avoidStaleDataNodesForWrite = avoidStaleDataNodesForWrite; // If # stale exceeds maximum staleness ratio, disable stale
} // datanode avoidance on the write path
return avoidStaleDataNodesForWrite &&
/** (numStaleNodes <= heartbeatManager.getLiveDatanodeCount()
* @return Whether or not to check stale DataNodes for R/W * ratioUseStaleDataNodesForWrite);
*/
boolean isCheckingForStaleDataNodes() {
return checkForStaleDataNodes;
} }
/** /**
@ -967,7 +959,7 @@ public class DatanodeManager {
port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT; port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT;
} else { } else {
hostStr = hostLine.substring(0, idx); hostStr = hostLine.substring(0, idx);
port = Integer.valueOf(hostLine.substring(idx)); port = Integer.valueOf(hostLine.substring(idx+1));
} }
if (InetAddresses.isInetAddress(hostStr)) { if (InetAddresses.isInetAddress(hostStr)) {

View File

@ -30,8 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
/** /**
* Manage the heartbeats received from datanodes. * Manage the heartbeats received from datanodes.
* The datanode list and statistics are synchronized * The datanode list and statistics are synchronized
@ -56,16 +54,7 @@ class HeartbeatManager implements DatanodeStatistics {
private final long heartbeatRecheckInterval; private final long heartbeatRecheckInterval;
/** Heartbeat monitor thread */ /** Heartbeat monitor thread */
private final Daemon heartbeatThread = new Daemon(new Monitor()); private final Daemon heartbeatThread = new Daemon(new Monitor());
/**
* The initial setting of end user which indicates whether or not to avoid
* writing to stale datanodes.
*/
private final boolean initialAvoidWriteStaleNodes;
/**
* When the ratio of stale datanodes reaches this number, stop avoiding
* writing to stale datanodes, i.e., continue using stale nodes for writing.
*/
private final float ratioUseStaleDataNodesForWrite;
final Namesystem namesystem; final Namesystem namesystem;
final BlockManager blockManager; final BlockManager blockManager;
@ -74,30 +63,25 @@ class HeartbeatManager implements DatanodeStatistics {
final BlockManager blockManager, final Configuration conf) { final BlockManager blockManager, final Configuration conf) {
this.namesystem = namesystem; this.namesystem = namesystem;
this.blockManager = blockManager; this.blockManager = blockManager;
boolean checkStaleNodes = conf.getBoolean( boolean avoidStaleDataNodesForWrite = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT); DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
long recheckInterval = conf.getInt( long recheckInterval = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 min DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 min
long staleInterval = conf.getLong( long staleInterval = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);// 30s DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);// 30s
this.initialAvoidWriteStaleNodes = DatanodeManager
.getAvoidStaleForWriteFromConf(conf, checkStaleNodes); if (avoidStaleDataNodesForWrite && staleInterval < recheckInterval) {
this.ratioUseStaleDataNodesForWrite = conf.getFloat( this.heartbeatRecheckInterval = staleInterval;
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY, LOG.info("Setting heartbeat recheck interval to " + staleInterval
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT); + " since " + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY
Preconditions.checkArgument( + " is less than "
(ratioUseStaleDataNodesForWrite > 0 && + DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
ratioUseStaleDataNodesForWrite <= 1.0f), } else {
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY + this.heartbeatRecheckInterval = recheckInterval;
" = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " + }
"It should be a positive non-zero float value, not greater than 1.0f.");
this.heartbeatRecheckInterval = (checkStaleNodes
&& initialAvoidWriteStaleNodes
&& staleInterval < recheckInterval) ? staleInterval : recheckInterval;
} }
void activate(Configuration conf) { void activate(Configuration conf) {
@ -242,7 +226,6 @@ class HeartbeatManager implements DatanodeStatistics {
if (namesystem.isInSafeMode()) { if (namesystem.isInSafeMode()) {
return; return;
} }
boolean checkStaleNodes = dm.isCheckingForStaleDataNodes();
boolean allAlive = false; boolean allAlive = false;
while (!allAlive) { while (!allAlive) {
// locate the first dead node. // locate the first dead node.
@ -254,29 +237,14 @@ class HeartbeatManager implements DatanodeStatistics {
if (dead == null && dm.isDatanodeDead(d)) { if (dead == null && dm.isDatanodeDead(d)) {
stats.incrExpiredHeartbeats(); stats.incrExpiredHeartbeats();
dead = d; dead = d;
if (!checkStaleNodes) {
break;
}
} }
if (checkStaleNodes && if (d.isStale(dm.getStaleInterval())) {
d.isStale(dm.getStaleInterval())) {
numOfStaleNodes++; numOfStaleNodes++;
} }
} }
// Change whether to avoid using stale datanodes for writing // Set the number of stale nodes in the DatanodeManager
// based on proportion of stale datanodes dm.setNumStaleNodes(numOfStaleNodes);
if (checkStaleNodes) {
dm.setNumStaleNodes(numOfStaleNodes);
if (numOfStaleNodes >
datanodes.size() * ratioUseStaleDataNodesForWrite) {
dm.setAvoidStaleDataNodesForWrite(false);
} else {
if (this.initialAvoidWriteStaleNodes) {
dm.setAvoidStaleDataNodesForWrite(true);
}
}
}
} }
allAlive = dead == null; allAlive = dead == null;

View File

@ -905,7 +905,7 @@ public abstract class Storage extends StorageInfo {
props.setProperty("storageType", storageType.toString()); props.setProperty("storageType", storageType.toString());
props.setProperty("namespaceID", String.valueOf(namespaceID)); props.setProperty("namespaceID", String.valueOf(namespaceID));
// Set clusterID in version with federation support // Set clusterID in version with federation support
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) { if (versionSupportsFederation()) {
props.setProperty("clusterID", clusterID); props.setProperty("clusterID", clusterID);
} }
props.setProperty("cTime", String.valueOf(cTime)); props.setProperty("cTime", String.valueOf(cTime));

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.server.common; package org.apache.hadoop.hdfs.server.common;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
@ -77,6 +79,10 @@ public class StorageInfo {
namespaceID = from.namespaceID; namespaceID = from.namespaceID;
cTime = from.cTime; cTime = from.cTime;
} }
public boolean versionSupportsFederation() {
return LayoutVersion.supports(Feature.FEDERATION, layoutVersion);
}
@Override @Override
public String toString() { public String toString() {

View File

@ -123,6 +123,10 @@ public class CheckpointSignature extends StorageInfo
blockpoolID.equals(si.getBlockPoolID()); blockpoolID.equals(si.getBlockPoolID());
} }
boolean namespaceIdMatches(FSImage si) {
return namespaceID == si.getStorage().namespaceID;
}
void validateStorageInfo(FSImage si) throws IOException { void validateStorageInfo(FSImage si) throws IOException {
if (!isSameCluster(si) if (!isSameCluster(si)
|| !storageVersionMatches(si.getStorage())) { || !storageVersionMatches(si.getStorage())) {

View File

@ -569,12 +569,10 @@ class ClusterJspHelper {
toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free)); toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));
// dfsUsedPercent // dfsUsedPercent
toXmlItemBlock(doc, "DFS Used%", toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent));
StringUtils.limitDecimalTo2(dfsUsedPercent)+ "%");
// dfsRemainingPercent // dfsRemainingPercent
toXmlItemBlock(doc, "DFS Remaining%", toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent));
StringUtils.limitDecimalTo2(dfsRemainingPercent) + "%");
doc.endTag(); // storage doc.endTag(); // storage

View File

@ -2157,7 +2157,8 @@ public class FSDirectory implements Closeable {
node.getUserName(snapshot), node.getUserName(snapshot),
node.getGroupName(snapshot), node.getGroupName(snapshot),
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null, node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
path); path,
node.getId());
} }
/** /**
@ -2194,6 +2195,7 @@ public class FSDirectory implements Closeable {
node.getGroupName(snapshot), node.getGroupName(snapshot),
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null, node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
path, path,
node.getId(),
loc); loc);
} }

View File

@ -1791,16 +1791,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Create a new file entry in the namespace. * Create a new file entry in the namespace.
* *
* For description of parameters and exceptions thrown see * For description of parameters and exceptions thrown see
* {@link ClientProtocol#create()} * {@link ClientProtocol#create()}, except it returns valid file status
* upon success
*/ */
void startFile(String src, PermissionStatus permissions, String holder, HdfsFileStatus startFile(String src, PermissionStatus permissions,
String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, String holder, String clientMachine, EnumSet<CreateFlag> flag,
short replication, long blockSize) throws AccessControlException, boolean createParent, short replication, long blockSize)
SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, throws AccessControlException, SafeModeException,
FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException { FileNotFoundException, ParentNotDirectoryException, IOException {
try { try {
startFileInt(src, permissions, holder, clientMachine, flag, createParent, return startFileInt(src, permissions, holder, clientMachine, flag,
replication, blockSize); createParent, replication, blockSize);
} catch (AccessControlException e) { } catch (AccessControlException e) {
if (isAuditEnabled() && isExternalInvocation()) { if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(false, UserGroupInformation.getCurrentUser(), logAuditEvent(false, UserGroupInformation.getCurrentUser(),
@ -1811,18 +1813,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
} }
private void startFileInt(String src, PermissionStatus permissions, String holder, private HdfsFileStatus startFileInt(String src, PermissionStatus permissions,
String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, String holder, String clientMachine, EnumSet<CreateFlag> flag,
short replication, long blockSize) throws AccessControlException, boolean createParent, short replication, long blockSize)
SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, throws AccessControlException, SafeModeException,
FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException { FileNotFoundException, ParentNotDirectoryException, IOException {
boolean skipSync = false; boolean skipSync = false;
final HdfsFileStatus stat;
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
startFileInternal(src, permissions, holder, clientMachine, flag, startFileInternal(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize); createParent, replication, blockSize);
stat = dir.getFileInfo(src, false);
} catch (StandbyException se) { } catch (StandbyException se) {
skipSync = true; skipSync = true;
throw se; throw se;
@ -1836,11 +1841,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
if (isAuditEnabled() && isExternalInvocation()) { if (isAuditEnabled() && isExternalInvocation()) {
final HdfsFileStatus stat = dir.getFileInfo(src, false);
logAuditEvent(UserGroupInformation.getCurrentUser(), logAuditEvent(UserGroupInformation.getCurrentUser(),
getRemoteIp(), getRemoteIp(),
"create", src, null, stat); "create", src, null, stat);
} }
return stat;
} }
/** /**
@ -2207,20 +2212,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* are replicated. Will return an empty 2-elt array if we want the * are replicated. Will return an empty 2-elt array if we want the
* client to "try again later". * client to "try again later".
*/ */
LocatedBlock getAdditionalBlock(String src, LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
String clientName, ExtendedBlock previous, HashMap<Node, Node> excludedNodes)
ExtendedBlock previous,
HashMap<Node, Node> excludedNodes
)
throws LeaseExpiredException, NotReplicatedYetException, throws LeaseExpiredException, NotReplicatedYetException,
QuotaExceededException, SafeModeException, UnresolvedLinkException, QuotaExceededException, SafeModeException, UnresolvedLinkException,
IOException { IOException {
checkBlock(previous); long blockSize;
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
long fileLength, blockSize;
int replication; int replication;
DatanodeDescriptor clientNode = null; DatanodeDescriptor clientNode = null;
Block newBlock = null;
if(NameNode.stateChangeLog.isDebugEnabled()) { if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug( NameNode.stateChangeLog.debug(
@ -2228,118 +2227,61 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
+src+" for "+clientName); +src+" for "+clientName);
} }
writeLock(); // Part I. Analyze the state of the file with respect to the input data.
readLock();
try { try {
checkOperation(OperationCategory.WRITE); LocatedBlock[] onRetryBlock = new LocatedBlock[1];
final INode[] inodes = analyzeFileState(
src, fileId, clientName, previous, onRetryBlock).getINodes();
final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1];
if (isInSafeMode()) { if(onRetryBlock[0] != null) {
throw new SafeModeException("Cannot add block to " + src, safeMode); // This is a retry. Just return the last block.
return onRetryBlock[0];
} }
// have we exceeded the configured limit of fs objects.
checkFsObjectLimit();
INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
// doesn't match up with what we think is the last block. There are
// three possibilities:
// 1) This is the first block allocation of an append() pipeline
// which started appending exactly at a block boundary.
// In this case, the client isn't passed the previous block,
// so it makes the allocateBlock() call with previous=null.
// We can distinguish this since the last block of the file
// will be exactly a full block.
// 2) This is a retry from a client that missed the response of a
// prior getAdditionalBlock() call, perhaps because of a network
// timeout, or because of an HA failover. In that case, we know
// by the fact that the client is re-issuing the RPC that it
// never began to write to the old block. Hence it is safe to
// abandon it and allocate a new one.
// 3) This is an entirely bogus request/bug -- we should error out
// rather than potentially appending a new block with an empty
// one in the middle, etc
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
if (previous == null &&
lastBlockInFile != null &&
lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
lastBlockInFile.isComplete()) {
// Case 1
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.allocateBlock: handling block allocation" +
" writing to a file with a complete previous block: src=" +
src + " lastBlock=" + lastBlockInFile);
}
} else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
// Case 2
if (lastBlockInFile.getNumBytes() != 0) {
throw new IOException(
"Request looked like a retry to allocate block " +
lastBlockInFile + " but it already contains " +
lastBlockInFile.getNumBytes() + " bytes");
}
// The retry case ("b" above) -- abandon the old block.
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
"caught retry for allocation of a new block in " +
src + ". Abandoning old block " + lastBlockInFile);
dir.removeBlock(src, pendingFile, lastBlockInFile);
dir.persistBlocks(src, pendingFile);
} else {
throw new IOException("Cannot allocate block in " + src + ": " +
"passed 'previous' block " + previous + " does not match actual " +
"last block in file " + lastBlockInFile);
}
}
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile, previousBlock);
//
// If we fail this, bad things happen!
//
if (!checkFileProgress(pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet:" + src);
}
fileLength = pendingFile.computeContentSummary().getLength();
blockSize = pendingFile.getPreferredBlockSize(); blockSize = pendingFile.getPreferredBlockSize();
clientNode = pendingFile.getClientNode(); clientNode = pendingFile.getClientNode();
replication = pendingFile.getFileReplication(); replication = pendingFile.getFileReplication();
} finally { } finally {
writeUnlock(); readUnlock();
} }
// choose targets for the new block to be allocated. // choose targets for the new block to be allocated.
final DatanodeDescriptor targets[] = blockManager.chooseTarget( final DatanodeDescriptor targets[] = getBlockManager().chooseTarget(
src, replication, clientNode, excludedNodes, blockSize); src, replication, clientNode, excludedNodes, blockSize);
// Allocate a new block and record it in the INode. // Part II.
// Allocate a new block, add it to the INode and the BlocksMap.
Block newBlock = null;
long offset;
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); // Run the full analysis again, since things could have changed
if (isInSafeMode()) { // while chooseTarget() was executing.
throw new SafeModeException("Cannot add block to " + src, safeMode); LocatedBlock[] onRetryBlock = new LocatedBlock[1];
INodesInPath inodesInPath =
analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
INode[] inodes = inodesInPath.getINodes();
final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1];
if(onRetryBlock[0] != null) {
// This is a retry. Just return the last block.
return onRetryBlock[0];
} }
final INodesInPath iip = dir.getINodesInPath4Write(src); // commit the last block and complete it if it has minimum replicas
final INodeFileUnderConstruction pendingFile commitOrCompleteLastBlock(pendingFile,
= checkLease(src, clientName, iip.getLastINode()); ExtendedBlock.getLocalBlock(previous));
if (!checkFileProgress(pendingFile, false)) { // allocate new block, record block locations in INode.
throw new NotReplicatedYetException("Not replicated yet:" + src); newBlock = createNewBlock();
} saveAllocatedBlock(src, inodesInPath, newBlock, targets);
// allocate new block record block locations in INode.
newBlock = allocateBlock(src, iip, targets);
for (DatanodeDescriptor dn : targets) {
dn.incBlocksScheduled();
}
dir.persistBlocks(src, pendingFile); dir.persistBlocks(src, pendingFile);
offset = pendingFile.computeFileSize(true);
} finally { } finally {
writeUnlock(); writeUnlock();
} }
@ -2347,10 +2289,114 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync(); getEditLog().logSync();
} }
// Create next block // Return located block
LocatedBlock b = new LocatedBlock(getExtendedBlock(newBlock), targets, fileLength); return makeLocatedBlock(newBlock, targets, offset);
blockManager.setBlockToken(b, BlockTokenSecretManager.AccessMode.WRITE); }
return b;
INodesInPath analyzeFileState(String src,
long fileId,
String clientName,
ExtendedBlock previous,
LocatedBlock[] onRetryBlock)
throws IOException {
assert hasReadOrWriteLock();
checkBlock(previous);
onRetryBlock[0] = null;
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException("Cannot add block to " + src, safeMode);
}
// have we exceeded the configured limit of fs objects.
checkFsObjectLimit();
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
final INodesInPath inodesInPath = dir.getINodesInPath4Write(src);
final INode[] inodes = inodesInPath.getINodes();
final INodeFileUnderConstruction pendingFile
= checkLease(src, fileId, clientName, inodes[inodes.length - 1]);
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
// doesn't match up with what we think is the last block. There are
// four possibilities:
// 1) This is the first block allocation of an append() pipeline
// which started appending exactly at a block boundary.
// In this case, the client isn't passed the previous block,
// so it makes the allocateBlock() call with previous=null.
// We can distinguish this since the last block of the file
// will be exactly a full block.
// 2) This is a retry from a client that missed the response of a
// prior getAdditionalBlock() call, perhaps because of a network
// timeout, or because of an HA failover. In that case, we know
// by the fact that the client is re-issuing the RPC that it
// never began to write to the old block. Hence it is safe to
// to return the existing block.
// 3) This is an entirely bogus request/bug -- we should error out
// rather than potentially appending a new block with an empty
// one in the middle, etc
// 4) This is a retry from a client that timed out while
// the prior getAdditionalBlock() is still being processed,
// currently working on chooseTarget().
// There are no means to distinguish between the first and
// the second attempts in Part I, because the first one hasn't
// changed the namesystem state yet.
// We run this analysis again in Part II where case 4 is impossible.
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
if (previous == null &&
lastBlockInFile != null &&
lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
lastBlockInFile.isComplete()) {
// Case 1
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.allocateBlock: handling block allocation" +
" writing to a file with a complete previous block: src=" +
src + " lastBlock=" + lastBlockInFile);
}
} else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
if (lastBlockInFile.getNumBytes() != 0) {
throw new IOException(
"Request looked like a retry to allocate block " +
lastBlockInFile + " but it already contains " +
lastBlockInFile.getNumBytes() + " bytes");
}
// Case 2
// Return the last block.
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
"caught retry for allocation of a new block in " +
src + ". Returning previously allocated block " + lastBlockInFile);
long offset = pendingFile.computeFileSize(true);
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
((BlockInfoUnderConstruction)lastBlockInFile).getExpectedLocations(),
offset);
return inodesInPath;
} else {
// Case 3
throw new IOException("Cannot allocate block in " + src + ": " +
"passed 'previous' block " + previous + " does not match actual " +
"last block in file " + lastBlockInFile);
}
}
// Check if the penultimate block is minimally replicated
if (!checkFileProgress(pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet: " + src);
}
return inodesInPath;
}
LocatedBlock makeLocatedBlock(Block blk,
DatanodeInfo[] locs,
long offset) throws IOException {
LocatedBlock lBlk = new LocatedBlock(
getExtendedBlock(blk), locs, offset);
getBlockManager().setBlockToken(
lBlk, BlockTokenSecretManager.AccessMode.WRITE);
return lBlk;
} }
/** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */ /** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
@ -2438,13 +2484,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
/** make sure that we still have the lease on this file. */ /** make sure that we still have the lease on this file. */
private INodeFileUnderConstruction checkLease(String src, String holder) private INodeFileUnderConstruction checkLease(String src, String holder)
throws LeaseExpiredException, UnresolvedLinkException { throws LeaseExpiredException, UnresolvedLinkException,
return checkLease(src, holder, dir.getINode(src)); FileNotFoundException {
return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder,
dir.getINode(src));
} }
private INodeFileUnderConstruction checkLease(String src, String holder, private INodeFileUnderConstruction checkLease(String src, long fileId,
INode file) throws LeaseExpiredException { String holder, INode file) throws LeaseExpiredException,
FileNotFoundException {
assert hasReadOrWriteLock(); assert hasReadOrWriteLock();
if (file == null || !(file instanceof INodeFile)) { if (file == null || !(file instanceof INodeFile)) {
Lease lease = leaseManager.getLease(holder); Lease lease = leaseManager.getLease(holder);
@ -2465,6 +2514,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throw new LeaseExpiredException("Lease mismatch on " + src + " owned by " throw new LeaseExpiredException("Lease mismatch on " + src + " owned by "
+ pendingFile.getClientName() + " but is accessed by " + holder); + pendingFile.getClientName() + " but is accessed by " + holder);
} }
INodeId.checkId(fileId, pendingFile);
return pendingFile; return pendingFile;
} }
@ -2506,7 +2556,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final INodesInPath iip = dir.getLastINodeInPath(src); final INodesInPath iip = dir.getLastINodeInPath(src);
final INodeFileUnderConstruction pendingFile; final INodeFileUnderConstruction pendingFile;
try { try {
pendingFile = checkLease(src, holder, iip.getINode(0)); pendingFile = checkLease(src, INodeId.GRANDFATHER_INODE_ID,
holder, iip.getINode(0));
} catch (LeaseExpiredException lee) { } catch (LeaseExpiredException lee) {
final INode inode = dir.getINode(src); final INode inode = dir.getINode(src);
if (inode != null && inode instanceof INodeFile && !inode.isUnderConstruction()) { if (inode != null && inode instanceof INodeFile && !inode.isUnderConstruction()) {
@ -2543,22 +2594,33 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
/** /**
* Allocate a block at the given pending filename * Save allocated block at the given pending filename
* *
* @param src path to the file * @param src path to the file
* @param inodesInPath representing each of the components of src. * @param inodesInPath representing each of the components of src.
* The last INode is the INode for the file. * The last INode is the INode for the file.
* @throws QuotaExceededException If addition of block exceeds space quota * @throws QuotaExceededException If addition of block exceeds space quota
*/ */
private Block allocateBlock(String src, INodesInPath inodesInPath, BlockInfo saveAllocatedBlock(String src, INodesInPath inodesInPath,
DatanodeDescriptor targets[]) throws IOException { Block newBlock, DatanodeDescriptor targets[]) throws IOException {
assert hasWriteLock();
BlockInfo b = dir.addBlock(src, inodesInPath, newBlock, targets);
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+ getBlockPoolId() + " " + b);
for (DatanodeDescriptor dn : targets) {
dn.incBlocksScheduled();
}
return b;
}
/**
* Create new block with a unique block id and a new generation stamp.
*/
Block createNewBlock() throws IOException {
assert hasWriteLock(); assert hasWriteLock();
Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0); Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0);
// Increment the generation stamp for every new block. // Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp()); b.setGenerationStamp(nextGenerationStamp());
b = dir.addBlock(src, inodesInPath, b, targets);
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+ blockPoolId + " " + b);
return b; return b;
} }
@ -5623,7 +5685,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
@Override @Override
public boolean isAvoidingStaleDataNodesForWrite() { public boolean isAvoidingStaleDataNodesForWrite() {
return this.blockManager.getDatanodeManager() return this.blockManager.getDatanodeManager()
.isAvoidingStaleDataNodesForWrite(); .shouldAvoidStaleDataNodesForWrite();
} }
public SnapshotManager getSnapshotManager() { public SnapshotManager getSnapshotManager() {

View File

@ -17,18 +17,21 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.SequentialNumber; import org.apache.hadoop.util.SequentialNumber;
/** /**
* An id which uniquely identifies an inode * An id which uniquely identifies an inode. Id 1 to 1000 are reserved for
* potential future usage. The id won't be recycled and is not expected to wrap
* around in a very long time. Root inode id is always 1001. Id 0 is used for
* backward compatibility support.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
class INodeId extends SequentialNumber { public class INodeId extends SequentialNumber {
/** /**
* The last reserved inode id. Reserve id 1 to 1000 for potential future * The last reserved inode id.
* usage. The id won't be recycled and is not expected to wrap around in a
* very long time. Root inode id will be 1001.
*/ */
public static final long LAST_RESERVED_ID = 1000L; public static final long LAST_RESERVED_ID = 1000L;
@ -38,6 +41,19 @@ class INodeId extends SequentialNumber {
*/ */
public static final long GRANDFATHER_INODE_ID = 0; public static final long GRANDFATHER_INODE_ID = 0;
/**
* To check if the request id is the same as saved id. Don't check fileId
* with GRANDFATHER_INODE_ID for backward compatibility.
*/
public static void checkId(long requestId, INode inode)
throws FileNotFoundException {
if (requestId != GRANDFATHER_INODE_ID && requestId != inode.getId()) {
throw new FileNotFoundException(
"ID mismatch. Request id and saved id: " + requestId + " , "
+ inode.getId());
}
}
INodeId() { INodeId() {
super(LAST_RESERVED_ID); super(LAST_RESERVED_ID);
} }

View File

@ -587,7 +587,7 @@ public class NNStorage extends Storage implements Closeable,
} }
// Set Block pool ID in version with federation support // Set Block pool ID in version with federation support
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) { if (versionSupportsFederation()) {
String sbpid = props.getProperty("blockpoolID"); String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid); setBlockPoolID(sd.getRoot(), sbpid);
} }
@ -634,7 +634,7 @@ public class NNStorage extends Storage implements Closeable,
) throws IOException { ) throws IOException {
super.setPropertiesFromFields(props, sd); super.setPropertiesFromFields(props, sd);
// Set blockpoolID in version with federation support // Set blockpoolID in version with federation support
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) { if (versionSupportsFederation()) {
props.setProperty("blockpoolID", blockpoolID); props.setProperty("blockpoolID", blockpoolID);
} }
} }

View File

@ -424,13 +424,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
} }
@Override // ClientProtocol @Override // ClientProtocol
public void create(String src, public HdfsFileStatus create(String src, FsPermission masked,
FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag,
String clientName, boolean createParent, short replication, long blockSize)
EnumSetWritable<CreateFlag> flag, throws IOException {
boolean createParent,
short replication,
long blockSize) throws IOException {
String clientMachine = getClientMachine(); String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) { if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.create: file " stateChangeLog.debug("*DIR* NameNode.create: file "
@ -440,12 +437,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
throw new IOException("create: Pathname too long. Limit " throw new IOException("create: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
} }
namesystem.startFile(src, HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), UserGroupInformation.getCurrentUser().getShortUserName(), null, masked),
null, masked), clientName, clientMachine, flag.get(), createParent, replication,
clientName, clientMachine, flag.get(), createParent, replication, blockSize); blockSize);
metrics.incrFilesCreated(); metrics.incrFilesCreated();
metrics.incrCreateFileOps(); metrics.incrCreateFileOps();
return fileStatus;
} }
@Override // ClientProtocol @Override // ClientProtocol
@ -484,26 +482,24 @@ class NameNodeRpcServer implements NamenodeProtocols {
throws IOException { throws IOException {
namesystem.setOwner(src, username, groupname); namesystem.setOwner(src, username, groupname);
} }
@Override // ClientProtocol @Override
public LocatedBlock addBlock(String src, public LocatedBlock addBlock(String src, String clientName,
String clientName, ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId)
ExtendedBlock previous,
DatanodeInfo[] excludedNodes)
throws IOException { throws IOException {
if(stateChangeLog.isDebugEnabled()) { if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
+src+" for "+clientName); + " fileId=" + fileId + " for " + clientName);
} }
HashMap<Node, Node> excludedNodesSet = null; HashMap<Node, Node> excludedNodesSet = null;
if (excludedNodes != null) { if (excludedNodes != null) {
excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length); excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length);
for (Node node:excludedNodes) { for (Node node : excludedNodes) {
excludedNodesSet.put(node, node); excludedNodesSet.put(node, node);
} }
} }
LocatedBlock locatedBlock = LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet); clientName, previous, excludedNodesSet);
if (locatedBlock != null) if (locatedBlock != null)
metrics.incrAddBlockOps(); metrics.incrAddBlockOps();
return locatedBlock; return locatedBlock;

View File

@ -17,12 +17,15 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
import java.io.IOException; import java.io.IOException;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean; import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage; import java.lang.management.MemoryUsage;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
@ -64,6 +67,14 @@ import org.znerd.xmlenc.XMLOutputter;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
class NamenodeJspHelper { class NamenodeJspHelper {
static String fraction2String(double value) {
return StringUtils.format("%.2f", value);
}
static String fraction2String(long numerator, long denominator) {
return fraction2String(numerator/(double)denominator);
}
static String getSafeModeText(FSNamesystem fsn) { static String getSafeModeText(FSNamesystem fsn) {
if (!fsn.isInSafeMode()) if (!fsn.isInSafeMode())
return ""; return "";
@ -361,20 +372,20 @@ class NamenodeJspHelper {
+ "DFS Remaining" + colTxt() + ":" + colTxt() + "DFS Remaining" + colTxt() + ":" + colTxt()
+ StringUtils.byteDesc(remaining) + rowTxt() + colTxt() + "DFS Used%" + StringUtils.byteDesc(remaining) + rowTxt() + colTxt() + "DFS Used%"
+ colTxt() + ":" + colTxt() + colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentUsed) + " %" + rowTxt() + percent2String(percentUsed) + rowTxt()
+ colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt() + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentRemaining) + " %" + percent2String(percentRemaining)
+ rowTxt() + colTxt() + "Block Pool Used" + colTxt() + ":" + colTxt() + rowTxt() + colTxt() + "Block Pool Used" + colTxt() + ":" + colTxt()
+ StringUtils.byteDesc(bpUsed) + rowTxt() + StringUtils.byteDesc(bpUsed) + rowTxt()
+ colTxt() + "Block Pool Used%"+ colTxt() + ":" + colTxt() + colTxt() + "Block Pool Used%"+ colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentBpUsed) + " %" + percent2String(percentBpUsed)
+ rowTxt() + colTxt() + "DataNodes usages" + colTxt() + ":" + colTxt() + rowTxt() + colTxt() + "DataNodes usages" + colTxt() + ":" + colTxt()
+ "Min %" + colTxt() + "Median %" + colTxt() + "Max %" + colTxt() + "Min %" + colTxt() + "Median %" + colTxt() + "Max %" + colTxt()
+ "stdev %" + rowTxt() + colTxt() + colTxt() + colTxt() + "stdev %" + rowTxt() + colTxt() + colTxt() + colTxt()
+ StringUtils.limitDecimalTo2(min) + " %" + percent2String(min)
+ colTxt() + StringUtils.limitDecimalTo2(median) + " %" + colTxt() + percent2String(median)
+ colTxt() + StringUtils.limitDecimalTo2(max) + " %" + colTxt() + percent2String(max)
+ colTxt() + StringUtils.limitDecimalTo2(dev) + " %" + colTxt() + percent2String(dev)
+ rowTxt() + colTxt() + rowTxt() + colTxt()
+ "<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> " + "<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> "
+ colTxt() + ":" + colTxt() + live.size() + colTxt() + ":" + colTxt() + live.size()
@ -443,7 +454,13 @@ class NamenodeJspHelper {
nodeToRedirect = nn.getHttpAddress().getHostName(); nodeToRedirect = nn.getHttpAddress().getHostName();
redirectPort = nn.getHttpAddress().getPort(); redirectPort = nn.getHttpAddress().getPort();
} }
String addr = nn.getNameNodeAddressHostPortString();
InetSocketAddress rpcAddr = nn.getNameNodeAddress();
String rpcHost = rpcAddr.getAddress().isAnyLocalAddress()
? URI.create(request.getRequestURL().toString()).getHost()
: rpcAddr.getAddress().getHostAddress();
String addr = rpcHost + ":" + rpcAddr.getPort();
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName(); String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
+ "/browseDirectory.jsp?namenodeInfoPort=" + "/browseDirectory.jsp?namenodeInfoPort="
@ -562,9 +579,9 @@ class NamenodeJspHelper {
long u = d.getDfsUsed(); long u = d.getDfsUsed();
long nu = d.getNonDfsUsed(); long nu = d.getNonDfsUsed();
long r = d.getRemaining(); long r = d.getRemaining();
String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent()); final double percentUsedValue = d.getDfsUsedPercent();
String percentRemaining = StringUtils.limitDecimalTo2(d String percentUsed = fraction2String(percentUsedValue);
.getRemainingPercent()); String percentRemaining = fraction2String(d.getRemainingPercent());
String adminState = d.getAdminState().toString(); String adminState = d.getAdminState().toString();
@ -572,32 +589,30 @@ class NamenodeJspHelper {
long currentTime = Time.now(); long currentTime = Time.now();
long bpUsed = d.getBlockPoolUsed(); long bpUsed = d.getBlockPoolUsed();
String percentBpUsed = StringUtils.limitDecimalTo2(d String percentBpUsed = fraction2String(d.getBlockPoolUsedPercent());
.getBlockPoolUsedPercent());
out.print("<td class=\"lastcontact\"> " out.print("<td class=\"lastcontact\"> "
+ ((currentTime - timestamp) / 1000) + ((currentTime - timestamp) / 1000)
+ "<td class=\"adminstate\">" + "<td class=\"adminstate\">"
+ adminState + adminState
+ "<td align=\"right\" class=\"capacity\">" + "<td align=\"right\" class=\"capacity\">"
+ StringUtils.limitDecimalTo2(c * 1.0 / diskBytes) + fraction2String(c, diskBytes)
+ "<td align=\"right\" class=\"used\">" + "<td align=\"right\" class=\"used\">"
+ StringUtils.limitDecimalTo2(u * 1.0 / diskBytes) + fraction2String(u, diskBytes)
+ "<td align=\"right\" class=\"nondfsused\">" + "<td align=\"right\" class=\"nondfsused\">"
+ StringUtils.limitDecimalTo2(nu * 1.0 / diskBytes) + fraction2String(nu, diskBytes)
+ "<td align=\"right\" class=\"remaining\">" + "<td align=\"right\" class=\"remaining\">"
+ StringUtils.limitDecimalTo2(r * 1.0 / diskBytes) + fraction2String(r, diskBytes)
+ "<td align=\"right\" class=\"pcused\">" + "<td align=\"right\" class=\"pcused\">"
+ percentUsed + percentUsed
+ "<td class=\"pcused\">" + "<td class=\"pcused\">"
+ ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed), + ServletUtil.percentageGraph((int)percentUsedValue, 100)
100)
+ "<td align=\"right\" class=\"pcremaining\">" + "<td align=\"right\" class=\"pcremaining\">"
+ percentRemaining + percentRemaining
+ "<td title=" + "\"blocks scheduled : " + "<td title=" + "\"blocks scheduled : "
+ d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks()+"\n" + d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks()+"\n"
+ "<td align=\"right\" class=\"bpused\">" + "<td align=\"right\" class=\"bpused\">"
+ StringUtils.limitDecimalTo2(bpUsed * 1.0 / diskBytes) + fraction2String(bpUsed, diskBytes)
+ "<td align=\"right\" class=\"pcbpused\">" + "<td align=\"right\" class=\"pcbpused\">"
+ percentBpUsed + percentBpUsed
+ "<td align=\"right\" class=\"volfails\">" + "<td align=\"right\" class=\"volfails\">"

View File

@ -475,14 +475,20 @@ public class SecondaryNameNode implements Runnable {
// Returns a token that would be used to upload the merged image. // Returns a token that would be used to upload the merged image.
CheckpointSignature sig = namenode.rollEditLog(); CheckpointSignature sig = namenode.rollEditLog();
if ((checkpointImage.getNamespaceID() == 0) || boolean loadImage = false;
(sig.isSameCluster(checkpointImage) && boolean isFreshCheckpointer = (checkpointImage.getNamespaceID() == 0);
boolean isSameCluster =
(dstStorage.versionSupportsFederation() && sig.isSameCluster(checkpointImage)) ||
(!dstStorage.versionSupportsFederation() && sig.namespaceIdMatches(checkpointImage));
if (isFreshCheckpointer ||
(isSameCluster &&
!sig.storageVersionMatches(checkpointImage.getStorage()))) { !sig.storageVersionMatches(checkpointImage.getStorage()))) {
// if we're a fresh 2NN, or if we're on the same cluster and our storage // if we're a fresh 2NN, or if we're on the same cluster and our storage
// needs an upgrade, just take the storage info from the server. // needs an upgrade, just take the storage info from the server.
dstStorage.setStorageInfo(sig); dstStorage.setStorageInfo(sig);
dstStorage.setClusterID(sig.getClusterID()); dstStorage.setClusterID(sig.getClusterID());
dstStorage.setBlockPoolID(sig.getBlockpoolID()); dstStorage.setBlockPoolID(sig.getBlockpoolID());
loadImage = true;
} }
sig.validateStorageInfo(checkpointImage); sig.validateStorageInfo(checkpointImage);
@ -492,7 +498,7 @@ public class SecondaryNameNode implements Runnable {
RemoteEditLogManifest manifest = RemoteEditLogManifest manifest =
namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1); namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
boolean loadImage = downloadCheckpointFiles( loadImage |= downloadCheckpointFiles(
fsName, checkpointImage, sig, manifest); // Fetch fsimage and edits fsName, checkpointImage, sig, manifest); // Fetch fsimage and edits
doMerge(sig, manifest, loadImage, checkpointImage, namesystem); doMerge(sig, manifest, loadImage, checkpointImage, namesystem);

View File

@ -316,8 +316,7 @@ public class DFSAdmin extends FsShell {
System.out.println("DFS Used: " + used System.out.println("DFS Used: " + used
+ " (" + StringUtils.byteDesc(used) + ")"); + " (" + StringUtils.byteDesc(used) + ")");
System.out.println("DFS Used%: " System.out.println("DFS Used%: "
+ StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100) + StringUtils.formatPercent(used/(double)presentCapacity, 2));
+ "%");
/* These counts are not always upto date. They are updated after /* These counts are not always upto date. They are updated after
* iteration of an internal list. Should be updated in a few seconds to * iteration of an internal list. Should be updated in a few seconds to

View File

@ -219,6 +219,7 @@ public class JsonUtil {
m.put("modificationTime", status.getModificationTime()); m.put("modificationTime", status.getModificationTime());
m.put("blockSize", status.getBlockSize()); m.put("blockSize", status.getBlockSize());
m.put("replication", status.getReplication()); m.put("replication", status.getReplication());
m.put("fileId", status.getFileId());
return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m); return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m);
} }
@ -243,9 +244,10 @@ public class JsonUtil {
final long mTime = (Long) m.get("modificationTime"); final long mTime = (Long) m.get("modificationTime");
final long blockSize = (Long) m.get("blockSize"); final long blockSize = (Long) m.get("blockSize");
final short replication = (short) (long) (Long) m.get("replication"); final short replication = (short) (long) (Long) m.get("replication");
final long fileId = (Long) m.get("fileId");
return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication, return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group, blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName)); symlink, DFSUtil.string2Bytes(localName), fileId);
} }
/** Convert an ExtendedBlock to a Json map. */ /** Convert an ExtendedBlock to a Json map. */

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.web.resources;
/** The concat source paths parameter. */ /** The concat source paths parameter. */
public class ConcatSourcesParam extends StringParam { public class ConcatSourcesParam extends StringParam {
/** Parameter name. */ /** Parameter name. */
public static final String NAME = "srcs"; public static final String NAME = "sources";
public static final String DEFAULT = NULL; public static final String DEFAULT = NULL;

View File

@ -67,7 +67,8 @@ message CreateRequestProto {
required uint64 blockSize = 7; required uint64 blockSize = 7;
} }
message CreateResponseProto { // void response message CreateResponseProto {
optional HdfsFileStatusProto fs = 1;
} }
message AppendRequestProto { message AppendRequestProto {
@ -119,6 +120,7 @@ message AddBlockRequestProto {
required string clientName = 2; required string clientName = 2;
optional ExtendedBlockProto previous = 3; optional ExtendedBlockProto previous = 3;
repeated DatanodeInfoProto excludeNodes = 4; repeated DatanodeInfoProto excludeNodes = 4;
optional uint64 fileId = 5 [default = 0]; // default as a bogus id
} }
message AddBlockResponseProto { message AddBlockResponseProto {

View File

@ -170,6 +170,9 @@ message HdfsFileStatusProto {
optional uint32 block_replication = 10 [default = 0]; // only 16bits used optional uint32 block_replication = 10 [default = 0]; // only 16bits used
optional uint64 blocksize = 11 [default = 0]; optional uint64 blocksize = 11 [default = 0];
optional LocatedBlocksProto locations = 12; // suppled only if asked by client optional LocatedBlocksProto locations = 12; // suppled only if asked by client
// Optional field for fileId
optional uint64 fileId = 13 [default = 0]; // default as an invalid id
} }
/** /**

View File

@ -999,17 +999,14 @@
</property> </property>
<property> <property>
<name>dfs.namenode.check.stale.datanode</name> <name>dfs.namenode.avoid.read.stale.datanode</name>
<value>false</value> <value>false</value>
<description> <description>
Indicate whether or not to check "stale" datanodes whose Indicate whether or not to avoid reading from &quot;stale&quot; datanodes whose
heartbeat messages have not been received by the namenode heartbeat messages have not been received by the namenode
for more than a specified time interval. If this configuration for more than a specified time interval. Stale datanodes will be
parameter is set as true, the system will keep track
of the number of stale datanodes. The stale datanodes will be
moved to the end of the node list returned for reading. See moved to the end of the node list returned for reading. See
dfs.namenode.avoid.write.stale.datanode for details on how this dfs.namenode.avoid.write.stale.datanode for a similar setting for writes.
affects writes.
</description> </description>
</property> </property>
@ -1017,13 +1014,13 @@
<name>dfs.namenode.avoid.write.stale.datanode</name> <name>dfs.namenode.avoid.write.stale.datanode</name>
<value>false</value> <value>false</value>
<description> <description>
Indicate whether or not to avoid writing to "stale" datanodes whose Indicate whether or not to avoid writing to &quot;stale&quot; datanodes whose
heartbeat messages have not been received by the namenode heartbeat messages have not been received by the namenode
for more than a specified time interval. If this configuration for more than a specified time interval. Writes will avoid using
parameter and dfs.namenode.check.stale.datanode are both set as true, stale datanodes unless more than a configured ratio
the writing will avoid using stale datanodes unless a high number (dfs.namenode.write.stale.datanode.ratio) of datanodes are marked as
of datanodes are marked as stale. See stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting
dfs.namenode.write.stale.datanode.ratio for details. for reads.
</description> </description>
</property> </property>

View File

@ -109,6 +109,9 @@ WebHDFS REST API
* {{{Append to a File}<<<APPEND>>>}} * {{{Append to a File}<<<APPEND>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append) (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
* {{{Concat File(s)}<<<CONCAT>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
* HTTP DELETE * HTTP DELETE
* {{{Delete a File/Directory}<<<DELETE>>>}} * {{{Delete a File/Directory}<<<DELETE>>>}}
@ -299,6 +302,32 @@ Content-Length: 0
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append
** {Concat File(s)}
* Submit a HTTP POST request.
+---------------------------------
curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<SOURCES>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
This REST API call is available as of Hadoop version 2.0.3.
Please note that <SOURCES> is a comma seperated list of absolute paths.
(Example: sources=/test/file1,/test/file2,/test/file3)
See also:
{{{Sources}<<<sources>>>}},
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat
** {Open and Read a File} ** {Open and Read a File}
* Submit a HTTP GET request with automatically following redirects. * Submit a HTTP GET request with automatically following redirects.
@ -1727,6 +1756,29 @@ var tokenProperties =
{{{Set Replication Factor}<<<SETREPLICATION>>>}} {{{Set Replication Factor}<<<SETREPLICATION>>>}}
** {Sources}
*----------------+-------------------------------------------------------------------+
|| Name | <<<sources>>> |
*----------------+-------------------------------------------------------------------+
|| Description | The comma seperated absolute paths used for concatenation. |
*----------------+-------------------------------------------------------------------+
|| Type | String |
*----------------+-------------------------------------------------------------------+
|| Default Value | \<empty\> |
*----------------+-------------------------------------------------------------------+
|| Valid Values | A list of comma seperated absolute FileSystem paths without scheme and authority. |
*----------------+-------------------------------------------------------------------+
|| Syntax | See the note in {{Delegation}}. |
*----------------+-------------------------------------------------------------------+
<<Note>> that sources are absolute FileSystem paths.
See also:
{{{Concat File(s)}<<<CONCAT>>>}}
** {Token} ** {Token}
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+

View File

@ -33,6 +33,7 @@ import java.io.FileInputStream;
import java.io.FileReader; import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.Socket; import java.net.Socket;
import java.net.URL; import java.net.URL;
@ -639,6 +640,9 @@ public class DFSTestUtil {
*/ */
public static byte[] urlGetBytes(URL url) throws IOException { public static byte[] urlGetBytes(URL url) throws IOException {
URLConnection conn = url.openConnection(); URLConnection conn = url.openConnection();
HttpURLConnection hc = (HttpURLConnection)conn;
assertEquals(HttpURLConnection.HTTP_OK, hc.getResponseCode());
ByteArrayOutputStream out = new ByteArrayOutputStream(); ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
return out.toByteArray(); return out.toByteArray();

View File

@ -23,22 +23,34 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.StandardSocketFactory;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
public class TestDFSClientFailover { public class TestDFSClientFailover {
@ -91,6 +103,63 @@ public class TestDFSClientFailover {
fs.close(); fs.close();
} }
/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
InjectingSocketFactory.class, SocketFactory.class);
// Set up the InjectingSocketFactory to throw a ConnectTimeoutException
// when connecting to the first NN.
InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
// Make the second NN the active one.
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
// Call a non-idempotent method, and ensure the failover of the call proceeds
// successfully.
IOUtils.closeStream(fs.create(TEST_FILE));
}
private static class InjectingSocketFactory extends StandardSocketFactory {
static SocketFactory defaultFactory = SocketFactory.getDefault();
static int portToInjectOn;
@Override
public Socket createSocket() throws IOException {
Socket spy = Mockito.spy(defaultFactory.createSocket());
// Simplify our spying job by not having to also spy on the channel
Mockito.doReturn(null).when(spy).getChannel();
// Throw a ConnectTimeoutException when connecting to our target "bad"
// host.
Mockito.doThrow(new ConnectTimeoutException("injected"))
.when(spy).connect(
Mockito.argThat(new MatchesPort()),
Mockito.anyInt());
return spy;
}
private class MatchesPort extends BaseMatcher<SocketAddress> {
@Override
public boolean matches(Object arg0) {
return ((InetSocketAddress)arg0).getPort() == portToInjectOn;
}
@Override
public void describeTo(Description desc) {
desc.appendText("matches port " + portToInjectOn);
}
}
}
/** /**
* Regression test for HDFS-2683. * Regression test for HDFS-2683.
*/ */

View File

@ -23,7 +23,10 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import static org.mockito.Matchers.any; import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyShort;
import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
@ -49,13 +52,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsUtils; import org.apache.hadoop.hdfs.client.HdfsUtils;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -64,12 +67,14 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
@ -208,7 +213,7 @@ public class TestDFSClientRetries {
* Verify that client will correctly give up after the specified number * Verify that client will correctly give up after the specified number
* of times trying to add a block * of times trying to add a block
*/ */
@SuppressWarnings("serial") @SuppressWarnings({ "serial", "unchecked" })
@Test @Test
public void testNotYetReplicatedErrors() throws IOException public void testNotYetReplicatedErrors() throws IOException
{ {
@ -235,7 +240,22 @@ public class TestDFSClientRetries {
when(mockNN.addBlock(anyString(), when(mockNN.addBlock(anyString(),
anyString(), anyString(),
any(ExtendedBlock.class), any(ExtendedBlock.class),
any(DatanodeInfo[].class))).thenAnswer(answer); any(DatanodeInfo[].class),
anyLong())).thenAnswer(answer);
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010))
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong());
final DFSClient client = new DFSClient(null, mockNN, conf, null); final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true); OutputStream os = client.create("testfile", true);
@ -369,7 +389,8 @@ public class TestDFSClientRetries {
return ret2; return ret2;
} }
}).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(), }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any()); Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
Mockito.anyLong());
doAnswer(new Answer<Boolean>() { doAnswer(new Answer<Boolean>() {
@ -410,7 +431,8 @@ public class TestDFSClientRetries {
// Make sure the mock was actually properly injected. // Make sure the mock was actually properly injected.
Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock( Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any()); Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
Mockito.anyLong());
Mockito.verify(spyNN, Mockito.atLeastOnce()).complete( Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any()); Mockito.<ExtendedBlock>any());

View File

@ -619,6 +619,16 @@ public class TestDFSUtil {
assertEquals(1, uris.size()); assertEquals(1, uris.size());
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
// it will automatically convert it to hostname
conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
uris = DFSUtil.getNameServiceUris(conf);
assertEquals(1, uris.size());
for (URI uri : uris) {
assertFalse(uri.getHost().equals("127.0.0.1"));
}
} }
@Test @Test

View File

@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -517,8 +518,8 @@ public class TestFileCreation {
+ "The file has " + locations.locatedBlockCount() + " blocks."); + "The file has " + locations.locatedBlockCount() + " blocks.");
// add one block to the file // add one block to the file
LocatedBlock location = client.getNamenode().addBlock(file1.toString(), LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
client.clientName, null, null); client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID);
System.out.println("testFileCreationError2: " System.out.println("testFileCreationError2: "
+ "Added block " + location.getBlock()); + "Added block " + location.getBlock());
@ -568,8 +569,8 @@ public class TestFileCreation {
final Path f = new Path("/foo.txt"); final Path f = new Path("/foo.txt");
createFile(dfs, f, 3); createFile(dfs, f, 3);
try { try {
cluster.getNameNodeRpc().addBlock(f.toString(), cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
client.clientName, null, null); null, null, INodeId.GRANDFATHER_INODE_ID);
fail(); fail();
} catch(IOException ioe) { } catch(IOException ioe) {
FileSystem.LOG.info("GOOD!", ioe); FileSystem.LOG.info("GOOD!", ioe);

View File

@ -43,7 +43,7 @@ public class TestFileLengthOnClusterRestart {
.numDataNodes(2).build(); .numDataNodes(2).build();
HdfsDataInputStream in = null; HdfsDataInputStream in = null;
try { try {
Path path = new Path(MiniDFSCluster.getBaseDirectory(), "test"); Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
DistributedFileSystem dfs = (DistributedFileSystem) cluster DistributedFileSystem dfs = (DistributedFileSystem) cluster
.getFileSystem(); .getFileSystem();
FSDataOutputStream out = dfs.create(path); FSDataOutputStream out = dfs.create(path);

View File

@ -88,7 +88,7 @@ public class TestGetBlocks {
@Test @Test
public void testReadSelectNonStaleDatanode() throws Exception { public void testReadSelectNonStaleDatanode() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
long staleInterval = 30 * 1000 * 60; long staleInterval = 30 * 1000 * 60;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
staleInterval); staleInterval);

View File

@ -183,8 +183,7 @@ public class TestLargeBlock {
try { try {
// create a new file in test data directory // create a new file in test data directory
Path file1 = new Path(System.getProperty("test.build.data") + "/" + Path file1 = new Path("/tmp/TestLargeBlock", blockSize + ".dat");
Long.toString(blockSize) + ".dat");
FSDataOutputStream stm = createFile(fs, file1, 1, blockSize); FSDataOutputStream stm = createFile(fs, file1, 1, blockSize);
LOG.info("File " + file1 + " created with file size " + LOG.info("File " + file1 + " created with file size " +
fileSize + fileSize +

View File

@ -18,6 +18,10 @@
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.anyShort;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy; import static org.mockito.Mockito.spy;
@ -29,14 +33,18 @@ import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@ -256,6 +264,7 @@ public class TestLease {
} }
} }
@SuppressWarnings("unchecked")
@Test @Test
public void testFactory() throws Exception { public void testFactory() throws Exception {
final String[] groups = new String[]{"supergroup"}; final String[] groups = new String[]{"supergroup"};
@ -264,6 +273,20 @@ public class TestLease {
ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups); ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
} }
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010)).when(mcp).getFileInfo(anyString());
Mockito
.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong());
final Configuration conf = new Configuration(); final Configuration conf = new Configuration();
final DFSClient c1 = createDFSClientAs(ugi[0], conf); final DFSClient c1 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out1 = createFsOut(c1, "/out1"); FSDataOutputStream out1 = createFsOut(c1, "/out1");

View File

@ -38,6 +38,7 @@ public class TestListFilesInDFS extends TestListFiles {
@BeforeClass @BeforeClass
public static void testSetUp() throws Exception { public static void testSetUp() throws Exception {
setTestPaths(new Path("/tmp/TestListFilesInDFS"));
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
fs.delete(TEST_DIR, true); fs.delete(TEST_DIR, true);

View File

@ -70,8 +70,8 @@ public class TestQuota {
throw new DSQuotaExceededException(bytes, bytes); throw new DSQuotaExceededException(bytes, bytes);
} catch(DSQuotaExceededException e) { } catch(DSQuotaExceededException e) {
assertEquals("The DiskSpace quota is exceeded: quota=1.0k " + assertEquals("The DiskSpace quota is exceeded: quota = 1024 B = 1 KB"
"diskspace consumed=1.0k", e.getMessage()); + " but diskspace consumed = 1024 B = 1 KB", e.getMessage());
} }
} }

View File

@ -67,7 +67,7 @@ public class TestRBWBlockInvalidation {
try { try {
final FSNamesystem namesystem = cluster.getNamesystem(); final FSNamesystem namesystem = cluster.getNamesystem();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1"); Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
out = fs.create(testPath, (short) 2); out = fs.create(testPath, (short) 2);
out.writeBytes("HDFS-3157: " + testPath); out.writeBytes("HDFS-3157: " + testPath);
out.hsync(); out.hsync();

View File

@ -88,9 +88,11 @@ public class TestReplicationPolicy {
"test.build.data", "build/test/data"), "dfs/"); "test.build.data", "build/test/data"), "dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath()); new File(baseDir, "name").getPath());
// Enable the checking for stale datanodes in the beginning
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
DFSTestUtil.formatNameNode(conf); DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf); namenode = new NameNode(conf);
@ -100,6 +102,8 @@ public class TestReplicationPolicy {
// construct network topology // construct network topology
for (int i=0; i < NUM_OF_DATANODES; i++) { for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]); cluster.add(dataNodes[i]);
bm.getDatanodeManager().getHeartbeatManager().addDatanode(
dataNodes[i]);
} }
for (int i=0; i < NUM_OF_DATANODES; i++) { for (int i=0; i < NUM_OF_DATANODES; i++) {
dataNodes[i].updateHeartbeat( dataNodes[i].updateHeartbeat(
@ -393,11 +397,11 @@ public class TestReplicationPolicy {
throws Exception { throws Exception {
try { try {
namenode.getNamesystem().getBlockManager().getDatanodeManager() namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setAvoidStaleDataNodesForWrite(true); .setNumStaleNodes(NUM_OF_DATANODES);
testChooseTargetWithMoreThanAvailableNodes(); testChooseTargetWithMoreThanAvailableNodes();
} finally { } finally {
namenode.getNamesystem().getBlockManager().getDatanodeManager() namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setAvoidStaleDataNodesForWrite(false); .setNumStaleNodes(0);
} }
} }
@ -479,12 +483,12 @@ public class TestReplicationPolicy {
@Test @Test
public void testChooseTargetWithStaleNodes() throws Exception { public void testChooseTargetWithStaleNodes() throws Exception {
// Enable avoidng writing to stale datanodes
namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setAvoidStaleDataNodesForWrite(true);
// Set dataNodes[0] as stale // Set dataNodes[0] as stale
dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1); dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
assertTrue(namenode.getNamesystem().getBlockManager()
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeDescriptor[] targets; DatanodeDescriptor[] targets;
// We set the datanode[0] as stale, thus should choose datanode[1] since // We set the datanode[0] as stale, thus should choose datanode[1] since
// datanode[1] is on the same rack with datanode[0] (writer) // datanode[1] is on the same rack with datanode[0] (writer)
@ -503,9 +507,9 @@ public class TestReplicationPolicy {
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
// reset // reset
namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setAvoidStaleDataNodesForWrite(false);
dataNodes[0].setLastUpdate(Time.now()); dataNodes[0].setLastUpdate(Time.now());
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
} }
/** /**
@ -518,20 +522,20 @@ public class TestReplicationPolicy {
*/ */
@Test @Test
public void testChooseTargetWithHalfStaleNodes() throws Exception { public void testChooseTargetWithHalfStaleNodes() throws Exception {
// Enable stale datanodes checking
namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setAvoidStaleDataNodesForWrite(true);
// Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
for (int i = 0; i < 3; i++) { for (int i = 0; i < 3; i++) {
dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1); dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
} }
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
DatanodeDescriptor[] targets; DatanodeDescriptor[] targets;
targets = replicator.chooseTarget(filename, 0, dataNodes[0], targets = replicator.chooseTarget(filename, 0, dataNodes[0],
new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE); new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0); assertEquals(targets.length, 0);
// We set the datanode[0] as stale, thus should choose datanode[1] // Since we have 6 datanodes total, stale nodes should
// not be returned until we ask for more than 3 targets
targets = replicator.chooseTarget(filename, 1, dataNodes[0], targets = replicator.chooseTarget(filename, 1, dataNodes[0],
new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE); new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1); assertEquals(targets.length, 1);
@ -557,18 +561,16 @@ public class TestReplicationPolicy {
assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3)); assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3)); assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
// reset
namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setAvoidStaleDataNodesForWrite(false);
for (int i = 0; i < dataNodes.length; i++) { for (int i = 0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now()); dataNodes[i].setLastUpdate(Time.now());
} }
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
} }
@Test @Test
public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception { public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
conf.setBoolean( conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true); DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
String[] hosts = new String[]{"host1", "host2", "host3", String[] hosts = new String[]{"host1", "host2", "host3",
@ -598,7 +600,7 @@ public class TestReplicationPolicy {
.getBlockManager().getDatanodeManager().getNumStaleNodes(); .getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes, 2); assertEquals(numStaleNodes, 2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager() assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().isAvoidingStaleDataNodesForWrite()); .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget // Call chooseTarget
DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode() DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
.getNamesystem().getBlockManager().getDatanodeManager() .getNamesystem().getBlockManager().getDatanodeManager()
@ -627,7 +629,7 @@ public class TestReplicationPolicy {
// According to our strategy, stale datanodes will be included for writing // According to our strategy, stale datanodes will be included for writing
// to avoid hotspots // to avoid hotspots
assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager() assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().isAvoidingStaleDataNodesForWrite()); .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget // Call chooseTarget
targets = replicator.chooseTarget(filename, 3, targets = replicator.chooseTarget(filename, 3,
staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE); staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
@ -650,7 +652,7 @@ public class TestReplicationPolicy {
.getBlockManager().getDatanodeManager().getNumStaleNodes(); .getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes, 2); assertEquals(numStaleNodes, 2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager() assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().isAvoidingStaleDataNodesForWrite()); .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget // Call chooseTarget
targets = replicator.chooseTarget(filename, 3, targets = replicator.chooseTarget(filename, 3,
staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE); staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);

View File

@ -506,7 +506,11 @@ public abstract class FSImageTestUtil {
props.load(fis); props.load(fis);
IOUtils.closeStream(fis); IOUtils.closeStream(fis);
props.setProperty(key, value); if (value == null || value.isEmpty()) {
props.remove(key);
} else {
props.setProperty(key, value);
}
out = new FileOutputStream(versionFile); out = new FileOutputStream(versionFile);
props.store(out, null); props.store(out, null);

View File

@ -1058,7 +1058,8 @@ public class NNThroughputBenchmark {
throws IOException { throws IOException {
ExtendedBlock prevBlock = null; ExtendedBlock prevBlock = null;
for(int jdx = 0; jdx < blocksPerFile; jdx++) { for(int jdx = 0; jdx < blocksPerFile; jdx++) {
LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null); LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
prevBlock, null, INodeId.GRANDFATHER_INODE_ID);
prevBlock = loc.getBlock(); prevBlock = loc.getBlock();
for(DatanodeInfo dnInfo : loc.getLocations()) { for(DatanodeInfo dnInfo : loc.getLocations()) {
int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr()); int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());

View File

@ -0,0 +1,141 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import java.lang.reflect.Field;
import java.util.EnumSet;
import java.util.HashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.net.Node;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Race between two threads simultaneously calling
* FSNamesystem.getAdditionalBlock().
*/
public class TestAddBlockRetry {
public static final Log LOG = LogFactory.getLog(TestAddBlockRetry.class);
private static final short REPLICATION = 3;
private Configuration conf;
private MiniDFSCluster cluster;
private int count = 0;
private LocatedBlock lb1;
private LocatedBlock lb2;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Retry addBlock() while another thread is in chooseTarget().
* See HDFS-4452.
*/
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
final String src = "/testRetryAddBlockWhileInChooseTarget";
FSNamesystem ns = cluster.getNamesystem();
BlockManager spyBM = spy(ns.getBlockManager());
final NamenodeProtocols nn = cluster.getNameNodeRpc();
// substitute mocked BlockManager into FSNamesystem
Class<? extends FSNamesystem> nsClass = ns.getClass();
Field bmField = nsClass.getDeclaredField("blockManager");
bmField.setAccessible(true);
bmField.set(ns, spyBM);
doAnswer(new Answer<DatanodeDescriptor[]>() {
@Override
public DatanodeDescriptor[] answer(InvocationOnMock invocation)
throws Throwable {
LOG.info("chooseTarget for " + src);
DatanodeDescriptor[] ret =
(DatanodeDescriptor[]) invocation.callRealMethod();
count++;
if(count == 1) { // run second addBlock()
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src, "clientName", null, null,
INodeId.GRANDFATHER_INODE_ID);
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
lb2 = lbs.get(0);
assertEquals("Wrong replication",
REPLICATION, lb2.getLocations().length);
}
return ret;
}
}).when(spyBM).chooseTarget(Mockito.anyString(), Mockito.anyInt(),
Mockito.<DatanodeDescriptor>any(), Mockito.<HashMap<Node, Node>>any(),
Mockito.anyLong());
// create file
nn.create(src, FsPermission.getFileDefault(),
"clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short)3, 1024);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
nn.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID);
// check locations
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
lb1 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
}

View File

@ -0,0 +1,134 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import java.net.InetSocketAddress;
import java.net.URL;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.junit.Test;
/**
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
*
*/
public class TestHostsFiles {
private static final Log LOG =
LogFactory.getLog(TestHostsFiles.class.getName());
/*
* Return a configuration object with low timeouts for testing and
* a topology script set (which enables rack awareness).
*/
private Configuration getConf() {
Configuration conf = new HdfsConfiguration();
// Lower the heart beat interval so the NN quickly learns of dead
// or decommissioned DNs and the NN issues replication and invalidation
// commands quickly (as replies to heartbeats)
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
// Have the NN ReplicationMonitor compute the replication and
// invalidation commands to send DNs every second.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
// Have the NN check for pending replications every second so it
// quickly schedules additional replicas as they are identified.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
// The DNs report blocks every second.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
// Indicates we have multiple racks
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
return conf;
}
@Test
public void testHostsExcludeDfshealthJsp() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
// Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
String names = name + "\n" + "localhost:42\n";
LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
LOG.info("nnaddr = '" + nnHttpAddress + "'");
URL nnjsp = new URL("http://" + nnHttpAddress.getHostName() + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp");
LOG.info("fetching " + nnjsp);
String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp));
LOG.info("got " + dfshealthPage);
assertTrue("dfshealth should contain localhost, got:" + dfshealthPage,
dfshealthPage.contains("localhost"));
} finally {
cluster.shutdown();
}
}
}

View File

@ -24,8 +24,10 @@ import static org.junit.Assert.fail;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
@ -39,6 +41,8 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.junit.Test; import org.junit.Test;
public class TestINodeFile { public class TestINodeFile {
@ -376,7 +380,7 @@ public class TestINodeFile {
* @throws IOException * @throws IOException
*/ */
@Test @Test
public void TestInodeId() throws IOException { public void testInodeId() throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
@ -396,9 +400,14 @@ public class TestINodeFile {
assertTrue(fs.mkdirs(path)); assertTrue(fs.mkdirs(path));
assertTrue(fsn.getLastInodeId() == 1002); assertTrue(fsn.getLastInodeId() == 1002);
Path filePath = new Path("/test1/file"); // Use namenode rpc to create a file
fs.create(filePath); NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
HdfsFileStatus fileStatus = nnrpc.create("/test1/file", new FsPermission(
(short) 0755), "client",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 1, 128 * 1024 * 1024L);
assertTrue(fsn.getLastInodeId() == 1003); assertTrue(fsn.getLastInodeId() == 1003);
assertTrue(fileStatus.getFileId() == 1003);
// Rename doesn't increase inode id // Rename doesn't increase inode id
Path renamedPath = new Path("/test2"); Path renamedPath = new Path("/test2");
@ -412,4 +421,44 @@ public class TestINodeFile {
cluster.waitActive(); cluster.waitActive();
assertTrue(fsn.getLastInodeId() == 1003); assertTrue(fsn.getLastInodeId() == 1003);
} }
@Test
public void testWriteToRenamedFile() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
byte[] data = new byte[size];
// Create one file
Path filePath = new Path("/test1/file");
FSDataOutputStream fos = fs.create(filePath);
// Rename /test1 to test2, and recreate /test1/file
Path renamedPath = new Path("/test2");
fs.rename(path, renamedPath);
fs.create(filePath, (short) 1);
// Add new block should fail since /test1/file has a different fileId
try {
fos.write(data, 0, data.length);
// make sure addBlock() request gets to NN immediately
fos.hflush();
fail("Write should fail after rename");
} catch (Exception e) {
/* Ignore */
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }

View File

@ -17,9 +17,12 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.collect.ImmutableMap;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Map;
import org.junit.Test; import org.junit.Test;
import org.junit.Before; import org.junit.Before;
@ -51,7 +54,7 @@ public class TestSecondaryNameNodeUpgrade {
} }
} }
private void doIt(String param, String val) throws IOException { private void doIt(Map<String, String> paramsToCorrupt) throws IOException {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
FileSystem fs = null; FileSystem fs = null;
SecondaryNameNode snn = null; SecondaryNameNode snn = null;
@ -76,8 +79,12 @@ public class TestSecondaryNameNodeUpgrade {
snn.shutdown(); snn.shutdown();
for (File versionFile : versionFiles) { for (File versionFile : versionFiles) {
System.out.println("Changing '" + param + "' to '" + val + "' in " + versionFile); for (Map.Entry<String, String> paramToCorrupt : paramsToCorrupt.entrySet()) {
FSImageTestUtil.corruptVersionFile(versionFile, param, val); String param = paramToCorrupt.getKey();
String val = paramToCorrupt.getValue();
System.out.println("Changing '" + param + "' to '" + val + "' in " + versionFile);
FSImageTestUtil.corruptVersionFile(versionFile, param, val);
}
} }
snn = new SecondaryNameNode(conf); snn = new SecondaryNameNode(conf);
@ -94,13 +101,19 @@ public class TestSecondaryNameNodeUpgrade {
@Test @Test
public void testUpgradeLayoutVersionSucceeds() throws IOException { public void testUpgradeLayoutVersionSucceeds() throws IOException {
doIt("layoutVersion", "-39"); doIt(ImmutableMap.of("layoutVersion", "-39"));
}
@Test
public void testUpgradePreFedSucceeds() throws IOException {
doIt(ImmutableMap.of("layoutVersion", "-19", "clusterID", "",
"blockpoolID", ""));
} }
@Test @Test
public void testChangeNsIDFails() throws IOException { public void testChangeNsIDFails() throws IOException {
try { try {
doIt("namespaceID", "2"); doIt(ImmutableMap.of("namespaceID", "2"));
Assert.fail("Should throw InconsistentFSStateException"); Assert.fail("Should throw InconsistentFSStateException");
} catch(IOException e) { } catch(IOException e) {
GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e); GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e);

View File

@ -82,7 +82,7 @@ public class TestNameNodeMetrics {
CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
"" + PERCENTILES_INTERVAL); "" + PERCENTILES_INTERVAL);
// Enable stale DataNodes checking // Enable stale DataNodes checking
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true); CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
((Log4JLogger)LogFactory.getLog(MetricsAsserts.class)) ((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
.getLogger().setLevel(Level.DEBUG); .getLogger().setLevel(Level.DEBUG);
} }

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -42,9 +43,10 @@ public class TestJsonUtil {
public void testHdfsFileStatus() { public void testHdfsFileStatus() {
final long now = Time.now(); final long now = Time.now();
final String parent = "/dir"; final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L<<26, final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short)0644), "user", "group", now, now + 10, new FsPermission((short) 0644), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo")); DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
INodeId.GRANDFATHER_INODE_ID);
final FileStatus fstatus = toFileStatus(status, parent); final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status); System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus); System.out.println("fstatus = " + fstatus);

View File

@ -1182,7 +1182,7 @@
</comparator> </comparator>
<comparator> <comparator>
<type>RegexpComparator</type> <type>RegexpComparator</type>
<expected-output>^1\.0k\s+hdfs:///dir0/data1k</expected-output> <expected-output>^1\.0 K\s+hdfs:///dir0/data1k</expected-output>
</comparator> </comparator>
</comparators> </comparators>
</test> </test>
@ -15590,7 +15590,7 @@
<comparators> <comparators>
<comparator> <comparator>
<type>RegexpComparator</type> <type>RegexpComparator</type>
<expected-output>put: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]*</expected-output> <expected-output>put: The DiskSpace quota of /dir1 is exceeded: quota = 1024 B = 1 KB but diskspace consumed = [0-9]+ B = [0-9.]+ [KMG]B*</expected-output>
</comparator> </comparator>
</comparators> </comparators>
</test> </test>

View File

@ -152,7 +152,22 @@ Trunk (Unreleased)
MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing
queue configuration. (Chris Nauroth via suresh) queue configuration. (Chris Nauroth via suresh)
Release 2.0.3-alpha - Unreleased Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
MAPREDUCE-4671. AM does not tell the RM about container requests which are
no longer needed. (Bikas Saha via sseth)
Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -215,6 +230,12 @@ Release 2.0.3-alpha - Unreleased
MAPREDUCE-4838. Add additional fields like Locality, Avataar to the MAPREDUCE-4838. Add additional fields like Locality, Avataar to the
JobHistory logs. (Zhijie Shen via sseth) JobHistory logs. (Zhijie Shen via sseth)
MAPREDUCE-4971. Minor extensibility enhancements to Counters &
FileOutputFormat. (Arun C Murthy via sseth)
MAPREDUCE-4977. Documentation for pluggable shuffle and pluggable sort.
(tucu)
OPTIMIZATIONS OPTIMIZATIONS
MAPREDUCE-4893. Fixed MR ApplicationMaster to do optimal assignment of MAPREDUCE-4893. Fixed MR ApplicationMaster to do optimal assignment of
@ -284,6 +305,8 @@ Release 2.0.3-alpha - Unreleased
MAPREDUCE-4969. TestKeyValueTextInputFormat test fails with Open JDK 7. MAPREDUCE-4969. TestKeyValueTextInputFormat test fails with Open JDK 7.
(Arpit Agarwal via suresh) (Arpit Agarwal via suresh)
MAPREDUCE-4953. HadoopPipes misuses fprintf. (Andy Isaacson via atm)
Release 2.0.2-alpha - 2012-09-07 Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -668,11 +691,17 @@ Release 0.23.7 - UNRELEASED
IMPROVEMENTS IMPROVEMENTS
MAPREDUCE-4905. test org.apache.hadoop.mapred.pipes
(Aleksey Gorshkov via bobby)
OPTIMIZATIONS OPTIMIZATIONS
MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the
number of map completion event type conversions. (Jason Lowe via sseth) number of map completion event type conversions. (Jason Lowe via sseth)
MAPREDUCE-4822. Unnecessary conversions in History Events. (Chu Tong via
jlowe)
BUG FIXES BUG FIXES
MAPREDUCE-4458. Warn if java.library.path is used for AM or Task MAPREDUCE-4458. Warn if java.library.path is used for AM or Task

View File

@ -72,7 +72,10 @@ public abstract class RMContainerRequestor extends RMCommunicator {
remoteRequestsTable = remoteRequestsTable =
new TreeMap<Priority, Map<String, Map<Resource, ResourceRequest>>>(); new TreeMap<Priority, Map<String, Map<Resource, ResourceRequest>>>();
private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(); // use custom comparator to make sure ResourceRequest objects differing only in
// numContainers dont end up as duplicates
private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
new org.apache.hadoop.yarn.util.BuilderUtils.ResourceRequestComparator());
private final Set<ContainerId> release = new TreeSet<ContainerId>(); private final Set<ContainerId> release = new TreeSet<ContainerId>();
private boolean nodeBlacklistingEnabled; private boolean nodeBlacklistingEnabled;
@ -235,7 +238,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
ResourceRequest zeroedRequest = BuilderUtils.newResourceRequest(req); ResourceRequest zeroedRequest = BuilderUtils.newResourceRequest(req);
zeroedRequest.setNumContainers(0); zeroedRequest.setNumContainers(0);
// to be sent to RM on next heartbeat // to be sent to RM on next heartbeat
ask.add(zeroedRequest); addResourceRequestToAsk(zeroedRequest);
} }
} }
// if all requests were still in ask queue // if all requests were still in ask queue
@ -320,7 +323,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1); remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1);
// Note this down for next interaction with ResourceManager // Note this down for next interaction with ResourceManager
ask.add(remoteRequest); addResourceRequestToAsk(remoteRequest);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("addResourceRequest:" + " applicationId=" LOG.debug("addResourceRequest:" + " applicationId="
+ applicationId.getId() + " priority=" + priority.getPriority() + applicationId.getId() + " priority=" + priority.getPriority()
@ -353,7 +356,12 @@ public abstract class RMContainerRequestor extends RMCommunicator {
+ remoteRequest.getNumContainers() + " #asks=" + ask.size()); + remoteRequest.getNumContainers() + " #asks=" + ask.size());
} }
remoteRequest.setNumContainers(remoteRequest.getNumContainers() -1); if(remoteRequest.getNumContainers() > 0) {
// based on blacklisting comments above we can end up decrementing more
// than requested. so guard for that.
remoteRequest.setNumContainers(remoteRequest.getNumContainers() -1);
}
if (remoteRequest.getNumContainers() == 0) { if (remoteRequest.getNumContainers() == 0) {
reqMap.remove(capability); reqMap.remove(capability);
if (reqMap.size() == 0) { if (reqMap.size() == 0) {
@ -362,13 +370,12 @@ public abstract class RMContainerRequestor extends RMCommunicator {
if (remoteRequests.size() == 0) { if (remoteRequests.size() == 0) {
remoteRequestsTable.remove(priority); remoteRequestsTable.remove(priority);
} }
//remove from ask if it may have
ask.remove(remoteRequest);
} else {
ask.add(remoteRequest);//this will override the request if ask doesn't
//already have it.
} }
// send the updated resource request to RM
// send 0 container count requests also to cancel previous requests
addResourceRequestToAsk(remoteRequest);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.info("AFTER decResourceRequest:" + " applicationId=" LOG.info("AFTER decResourceRequest:" + " applicationId="
+ applicationId.getId() + " priority=" + priority.getPriority() + applicationId.getId() + " priority=" + priority.getPriority()
@ -376,6 +383,16 @@ public abstract class RMContainerRequestor extends RMCommunicator {
+ remoteRequest.getNumContainers() + " #asks=" + ask.size()); + remoteRequest.getNumContainers() + " #asks=" + ask.size());
} }
} }
private void addResourceRequestToAsk(ResourceRequest remoteRequest) {
// because objects inside the resource map can be deleted ask can end up
// containing an object that matches new resource object but with different
// numContainers. So exisintg values must be replaced explicitly
if(ask.contains(remoteRequest)) {
ask.remove(remoteRequest);
}
ask.add(remoteRequest);
}
protected void release(ContainerId containerId) { protected void release(ContainerId containerId) {
release.add(containerId); release.add(containerId);

View File

@ -167,6 +167,7 @@ public class TestRMContainerAllocator {
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule(); List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await(); dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size()); Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());
// send another request with different resource and priority // send another request with different resource and priority
ContainerRequestEvent event3 = createReq(jobId, 3, 1024, ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
@ -178,7 +179,8 @@ public class TestRMContainerAllocator {
assigned = allocator.schedule(); assigned = allocator.schedule();
dispatcher.await(); dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size()); Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
// update resources in scheduler // update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat nodeManager1.nodeHeartbeat(true); // Node heartbeat
nodeManager2.nodeHeartbeat(true); // Node heartbeat nodeManager2.nodeHeartbeat(true); // Node heartbeat
@ -187,8 +189,14 @@ public class TestRMContainerAllocator {
assigned = allocator.schedule(); assigned = allocator.schedule();
dispatcher.await(); dispatcher.await();
Assert.assertEquals(0, rm.getMyFifoScheduler().lastAsk.size());
checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 }, checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
assigned, false); assigned, false);
// check that the assigned container requests are cancelled
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(5, rm.getMyFifoScheduler().lastAsk.size());
} }
@Test @Test
@ -422,7 +430,7 @@ public class TestRMContainerAllocator {
} }
private static class MyResourceManager extends MockRM { private static class MyResourceManager extends MockRM {
public MyResourceManager(Configuration conf) { public MyResourceManager(Configuration conf) {
super(conf); super(conf);
} }
@ -446,6 +454,10 @@ public class TestRMContainerAllocator {
protected ResourceScheduler createScheduler() { protected ResourceScheduler createScheduler() {
return new MyFifoScheduler(this.getRMContext()); return new MyFifoScheduler(this.getRMContext());
} }
MyFifoScheduler getMyFifoScheduler() {
return (MyFifoScheduler) scheduler;
}
} }
@Test @Test
@ -1194,7 +1206,9 @@ public class TestRMContainerAllocator {
assert (false); assert (false);
} }
} }
List<ResourceRequest> lastAsk = null;
// override this to copy the objects otherwise FifoScheduler updates the // override this to copy the objects otherwise FifoScheduler updates the
// numContainers in same objects as kept by RMContainerAllocator // numContainers in same objects as kept by RMContainerAllocator
@Override @Override
@ -1208,6 +1222,7 @@ public class TestRMContainerAllocator {
.getNumContainers()); .getNumContainers());
askCopy.add(reqCopy); askCopy.add(reqCopy);
} }
lastAsk = ask;
return super.allocate(applicationAttemptId, askCopy, release); return super.allocate(applicationAttemptId, askCopy, release);
} }
} }

View File

@ -60,7 +60,7 @@ import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.SystemClock; import org.apache.hadoop.yarn.SystemClock;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher; import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;

View File

@ -230,6 +230,10 @@ public class Counters
public static class Group implements CounterGroupBase<Counter> { public static class Group implements CounterGroupBase<Counter> {
private CounterGroupBase<Counter> realGroup; private CounterGroupBase<Counter> realGroup;
protected Group() {
realGroup = null;
}
Group(GenericGroup group) { Group(GenericGroup group) {
this.realGroup = group; this.realGroup = group;
} }

View File

@ -92,7 +92,7 @@ public class FileOutputCommitter extends OutputCommitter {
} }
@Private @Private
Path getTaskAttemptPath(TaskAttemptContext context) throws IOException { public Path getTaskAttemptPath(TaskAttemptContext context) throws IOException {
Path out = getOutputPath(context); Path out = getOutputPath(context);
return out == null ? null : getTaskAttemptPath(context, out); return out == null ? null : getTaskAttemptPath(context, out);
} }

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.text.NumberFormat; import java.text.NumberFormat;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -152,8 +153,8 @@ public abstract class FileOutputFormat<K, V> implements OutputFormat<K, V> {
* @param outputDir the {@link Path} of the output directory * @param outputDir the {@link Path} of the output directory
* for the map-reduce job. * for the map-reduce job.
*/ */
@Private
static void setWorkOutputPath(JobConf conf, Path outputDir) { public static void setWorkOutputPath(JobConf conf, Path outputDir) {
outputDir = new Path(conf.getWorkingDirectory(), outputDir); outputDir = new Path(conf.getWorkingDirectory(), outputDir);
conf.set(JobContext.TASK_OUTPUT_DIR, outputDir.toString()); conf.set(JobContext.TASK_OUTPUT_DIR, outputDir.toString());
} }

View File

@ -28,6 +28,7 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
@ -419,7 +420,8 @@ public class JobConf extends Configuration {
return credentials; return credentials;
} }
void setCredentials(Credentials credentials) { @Private
public void setCredentials(Credentials credentials) {
this.credentials = credentials; this.credentials = credentials;
} }

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.mapred.lib.LazyOutputFormat;
import org.apache.hadoop.mapred.lib.NullOutputFormat; import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache; import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
@ -515,7 +516,7 @@ public class Submitter extends Configured implements Tool {
*/ */
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
int exitCode = new Submitter().run(args); int exitCode = new Submitter().run(args);
System.exit(exitCode); ExitUtil.terminate(exitCode);
} }
} }

View File

@ -35,6 +35,7 @@ import com.google.common.collect.Iterators;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.FileSystemCounter; import org.apache.hadoop.mapreduce.FileSystemCounter;
@ -72,6 +73,16 @@ public abstract class FileSystemCounterGroup<C extends Counter>
this.scheme = scheme; this.scheme = scheme;
key = ref; key = ref;
} }
@Private
public String getScheme() {
return scheme;
}
@Private
public FileSystemCounter getFileSystemCounter() {
return key;
}
@Override @Override
public String getName() { public String getName() {

View File

@ -29,6 +29,7 @@ import java.util.Iterator;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.util.ResourceBundles; import org.apache.hadoop.mapreduce.util.ResourceBundles;
@ -66,7 +67,17 @@ public abstract class FrameworkCounterGroup<T extends Enum<T>,
key = ref; key = ref;
this.groupName = groupName; this.groupName = groupName;
} }
@Private
public T getKey() {
return key;
}
@Private
public String getGroupName() {
return groupName;
}
@Override @Override
public String getName() { public String getName() {
return key.name(); return key.name();

Some files were not shown because too many files have changed in this diff Show More