Merge r1441206 through r1444434 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1444439 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
fa8bafbd46
|
@ -91,13 +91,19 @@
|
|||
<includes>
|
||||
<include>${project.artifactId}-${project.version}.jar</include>
|
||||
<include>${project.artifactId}-${project.version}-tests.jar</include>
|
||||
<include>${project.artifactId}-${project.version}-sources.jar</include>
|
||||
<include>${project.artifactId}-${project.version}-test-sources.jar</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>hadoop-tools-dist-*.jar</exclude>
|
||||
</excludes>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>${project.build.directory}</directory>
|
||||
<outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
|
||||
<includes>
|
||||
<include>${project.artifactId}-${project.version}-sources.jar</include>
|
||||
<include>${project.artifactId}-${project.version}-test-sources.jar</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>${basedir}/dev-support/jdiff</directory>
|
||||
<outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory>
|
||||
|
|
|
@ -149,6 +149,8 @@ Trunk (Unreleased)
|
|||
HADOOP-8924. Add maven plugin alternative to shell script to save
|
||||
package-info.java. (Chris Nauroth via suresh)
|
||||
|
||||
HADOOP-9277. Improve javadoc for FileContext. (Andrew Wang via suresh)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
|
||||
|
@ -339,7 +341,26 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
|
||||
|
||||
Release 2.0.3-alpha - Unreleased
|
||||
Release 2.0.4-beta - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-9253. Capture ulimit info in the logs at service start time.
|
||||
(Arpit Gupta via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
|
||||
|
||||
Release 2.0.3-alpha - 2013-02-06
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -467,6 +488,9 @@ Release 2.0.3-alpha - Unreleased
|
|||
HADOOP-9231. Parametrize staging URL for the uniformity of
|
||||
distributionManagement. (Konstantin Boudnik via suresh)
|
||||
|
||||
HADOOP-9276. Allow BoundedByteArrayOutputStream to be resettable.
|
||||
(Arun Murthy via hitesh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
|
||||
|
@ -592,6 +616,22 @@ Release 2.0.3-alpha - Unreleased
|
|||
HADOOP-9221. Convert remaining xdocs to APT. (Andy Isaacson via atm)
|
||||
|
||||
HADOOP-8981. TestMetricsSystemImpl fails on Windows. (Xuan Gong via suresh)
|
||||
|
||||
HADOOP-9124. SortedMapWritable violates contract of Map interface for
|
||||
equals() and hashCode(). (Surenkumar Nihalani via tomwhite)
|
||||
|
||||
HADOOP-9252. In StringUtils, humanReadableInt(..) has a race condition and
|
||||
the synchronization of limitDecimalTo2(double) can be avoided. (szetszwo)
|
||||
|
||||
HADOOP-9260. Hadoop version may be not correct when starting name node or
|
||||
data node. (Chris Nauroth via jlowe)
|
||||
|
||||
HADOOP-9278. Fix the file handle leak in HarMetaData.parseMetaData() in
|
||||
HarFileSystem. (Chris Nauroth via szetszwo)
|
||||
|
||||
HADOOP-9289. FsShell rm -f fails for non-matching globs. (Daryn Sharp via
|
||||
suresh)
|
||||
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
|
@ -1294,6 +1334,9 @@ Release 0.23.7 - UNRELEASED
|
|||
HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
|
||||
permissions (Ivan A. Veselovsky via bobby)
|
||||
|
||||
HADOOP-9067. provide test for LocalFileSystem.reportChecksumFailure
|
||||
(Ivan A. Veselovsky via bobby)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
|
|
@ -83,7 +83,8 @@ fi
|
|||
if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
|
||||
export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
|
||||
export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
|
||||
export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
|
||||
export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
|
||||
starting_secure_dn="true"
|
||||
fi
|
||||
|
||||
if [ "$HADOOP_IDENT_STRING" = "" ]; then
|
||||
|
@ -154,7 +155,17 @@ case $startStop in
|
|||
;;
|
||||
esac
|
||||
echo $! > $pid
|
||||
sleep 1; head "$log"
|
||||
sleep 1
|
||||
# capture the ulimit output
|
||||
if [ "true" = "$starting_secure_dn" ]; then
|
||||
echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
|
||||
# capture the ulimit info for the appropriate user
|
||||
su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
|
||||
else
|
||||
echo "ulimit -a for user $USER" >> $log
|
||||
ulimit -a >> $log 2>&1
|
||||
fi
|
||||
head -30 "$log"
|
||||
sleep 3;
|
||||
if ! ps -p $! > /dev/null ; then
|
||||
exit 1
|
||||
|
|
|
@ -57,70 +57,60 @@ import org.apache.hadoop.security.token.Token;
|
|||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
/**
|
||||
* The FileContext class provides an interface to the application writer for
|
||||
* using the Hadoop file system.
|
||||
* It provides a set of methods for the usual operation: create, open,
|
||||
* list, etc
|
||||
* The FileContext class provides an interface for users of the Hadoop
|
||||
* file system. It exposes a number of file system operations, e.g. create,
|
||||
* open, list.
|
||||
*
|
||||
* <p>
|
||||
* <b> *** Path Names *** </b>
|
||||
* <p>
|
||||
* <h2>Path Names</h2>
|
||||
*
|
||||
* The Hadoop file system supports a URI name space and URI names.
|
||||
* It offers a forest of file systems that can be referenced using fully
|
||||
* qualified URIs.
|
||||
* Two common Hadoop file systems implementations are
|
||||
* The Hadoop file system supports a URI namespace and URI names. This enables
|
||||
* multiple types of file systems to be referenced using fully-qualified URIs.
|
||||
* Two common Hadoop file system implementations are
|
||||
* <ul>
|
||||
* <li> the local file system: file:///path
|
||||
* <li> the hdfs file system hdfs://nnAddress:nnPort/path
|
||||
* <li>the local file system: file:///path
|
||||
* <li>the HDFS file system: hdfs://nnAddress:nnPort/path
|
||||
* </ul>
|
||||
*
|
||||
* While URI names are very flexible, it requires knowing the name or address
|
||||
* of the server. For convenience one often wants to access the default system
|
||||
* in one's environment without knowing its name/address. This has an
|
||||
* additional benefit that it allows one to change one's default fs
|
||||
* (e.g. admin moves application from cluster1 to cluster2).
|
||||
* The Hadoop file system also supports additional naming schemes besides URIs.
|
||||
* Hadoop has the concept of a <i>default file system</i>, which implies a
|
||||
* default URI scheme and authority. This enables <i>slash-relative names</i>
|
||||
* relative to the default FS, which are more convenient for users and
|
||||
* application writers. The default FS is typically set by the user's
|
||||
* environment, though it can also be manually specified.
|
||||
* <p>
|
||||
*
|
||||
* To facilitate this, Hadoop supports a notion of a default file system.
|
||||
* The user can set his default file system, although this is
|
||||
* typically set up for you in your environment via your default config.
|
||||
* A default file system implies a default scheme and authority; slash-relative
|
||||
* names (such as /for/bar) are resolved relative to that default FS.
|
||||
* Similarly a user can also have working-directory-relative names (i.e. names
|
||||
* not starting with a slash). While the working directory is generally in the
|
||||
* same default FS, the wd can be in a different FS.
|
||||
* Hadoop also supports <i>working-directory-relative</i> names, which are paths
|
||||
* relative to the current working directory (similar to Unix). The working
|
||||
* directory can be in a different file system than the default FS.
|
||||
* <p>
|
||||
* Hence Hadoop path names can be one of:
|
||||
* <ul>
|
||||
* <li> fully qualified URI: scheme://authority/path
|
||||
* <li> slash relative names: /path relative to the default file system
|
||||
* <li> wd-relative names: path relative to the working dir
|
||||
* </ul>
|
||||
* Thus, Hadoop path names can be specified as one of the following:
|
||||
* <ul>
|
||||
* <li>a fully-qualified URI: scheme://authority/path (e.g.
|
||||
* hdfs://nnAddress:nnPort/foo/bar)
|
||||
* <li>a slash-relative name: path relative to the default file system (e.g.
|
||||
* /foo/bar)
|
||||
* <li>a working-directory-relative name: path relative to the working dir (e.g.
|
||||
* foo/bar)
|
||||
* </ul>
|
||||
* Relative paths with scheme (scheme:foo/bar) are illegal.
|
||||
*
|
||||
* <p>
|
||||
* <b>****The Role of the FileContext and configuration defaults****</b>
|
||||
* <p>
|
||||
* The FileContext provides file namespace context for resolving file names;
|
||||
* it also contains the umask for permissions, In that sense it is like the
|
||||
* per-process file-related state in Unix system.
|
||||
* These two properties
|
||||
* <ul>
|
||||
* <li> default file system i.e your slash)
|
||||
* <li> umask
|
||||
* </ul>
|
||||
* in general, are obtained from the default configuration file
|
||||
* in your environment, (@see {@link Configuration}).
|
||||
*
|
||||
* No other configuration parameters are obtained from the default config as
|
||||
* far as the file context layer is concerned. All file system instances
|
||||
* (i.e. deployments of file systems) have default properties; we call these
|
||||
* server side (SS) defaults. Operation like create allow one to select many
|
||||
* properties: either pass them in as explicit parameters or use
|
||||
* the SS properties.
|
||||
* <p>
|
||||
* The file system related SS defaults are
|
||||
* <h2>Role of FileContext and Configuration Defaults</h2>
|
||||
*
|
||||
* The FileContext is the analogue of per-process file-related state in Unix. It
|
||||
* contains two properties:
|
||||
*
|
||||
* <ul>
|
||||
* <li>the default file system (for resolving slash-relative names)
|
||||
* <li>the umask (for file permissions)
|
||||
* </ul>
|
||||
* In general, these properties are obtained from the default configuration file
|
||||
* in the user's environment (see {@link Configuration}).
|
||||
*
|
||||
* Further file system properties are specified on the server-side. File system
|
||||
* operations default to using these server-side defaults unless otherwise
|
||||
* specified.
|
||||
* <p>
|
||||
* The file system related server-side defaults are:
|
||||
* <ul>
|
||||
* <li> the home directory (default is "/user/userName")
|
||||
* <li> the initial wd (only for local fs)
|
||||
|
@ -131,34 +121,34 @@ import org.apache.hadoop.util.ShutdownHookManager;
|
|||
* <li> checksum option. (checksumType and bytesPerChecksum)
|
||||
* </ul>
|
||||
*
|
||||
* <p>
|
||||
* <b> *** Usage Model for the FileContext class *** </b>
|
||||
* <p>
|
||||
* <h2>Example Usage</h2>
|
||||
*
|
||||
* Example 1: use the default config read from the $HADOOP_CONFIG/core.xml.
|
||||
* Unspecified values come from core-defaults.xml in the release jar.
|
||||
* <ul>
|
||||
* <li> myFContext = FileContext.getFileContext(); // uses the default config
|
||||
* // which has your default FS
|
||||
* <li> myFContext.create(path, ...);
|
||||
* <li> myFContext.setWorkingDir(path)
|
||||
* <li> myFContext.setWorkingDir(path);
|
||||
* <li> myFContext.open (path, ...);
|
||||
* <li>...
|
||||
* </ul>
|
||||
* Example 2: Get a FileContext with a specific URI as the default FS
|
||||
* <ul>
|
||||
* <li> myFContext = FileContext.getFileContext(URI)
|
||||
* <li> myFContext = FileContext.getFileContext(URI);
|
||||
* <li> myFContext.create(path, ...);
|
||||
* ...
|
||||
* </ul>
|
||||
* <li>...
|
||||
* </ul>
|
||||
* Example 3: FileContext with local file system as the default
|
||||
* <ul>
|
||||
* <li> myFContext = FileContext.getLocalFSFileContext()
|
||||
* <li> myFContext = FileContext.getLocalFSFileContext();
|
||||
* <li> myFContext.create(path, ...);
|
||||
* <li> ...
|
||||
* </ul>
|
||||
* Example 4: Use a specific config, ignoring $HADOOP_CONFIG
|
||||
* Generally you should not need use a config unless you are doing
|
||||
* <ul>
|
||||
* <li> configX = someConfigSomeOnePassedToYou.
|
||||
* <li> configX = someConfigSomeOnePassedToYou;
|
||||
* <li> myFContext = getFileContext(configX); // configX is not changed,
|
||||
* // is passed down
|
||||
* <li> myFContext.create(path, ...);
|
||||
|
|
|
@ -30,8 +30,11 @@ import java.util.TreeMap;
|
|||
import java.util.HashMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.util.LineReader;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
@ -50,6 +53,9 @@ import org.apache.hadoop.util.Progressable;
|
|||
*/
|
||||
|
||||
public class HarFileSystem extends FilterFileSystem {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
|
||||
|
||||
public static final int VERSION = 3;
|
||||
|
||||
private static final Map<URI, HarMetaData> harMetaCache =
|
||||
|
@ -1025,68 +1031,69 @@ public class HarFileSystem extends FilterFileSystem {
|
|||
}
|
||||
|
||||
private void parseMetaData() throws IOException {
|
||||
FSDataInputStream in = fs.open(masterIndexPath);
|
||||
FileStatus masterStat = fs.getFileStatus(masterIndexPath);
|
||||
masterIndexTimestamp = masterStat.getModificationTime();
|
||||
LineReader lin = new LineReader(in, getConf());
|
||||
Text line = new Text();
|
||||
long read = lin.readLine(line);
|
||||
Text line;
|
||||
long read;
|
||||
FSDataInputStream in = null;
|
||||
LineReader lin = null;
|
||||
|
||||
// the first line contains the version of the index file
|
||||
String versionLine = line.toString();
|
||||
String[] arr = versionLine.split(" ");
|
||||
version = Integer.parseInt(arr[0]);
|
||||
// make it always backwards-compatible
|
||||
if (this.version > HarFileSystem.VERSION) {
|
||||
throw new IOException("Invalid version " +
|
||||
this.version + " expected " + HarFileSystem.VERSION);
|
||||
}
|
||||
|
||||
// each line contains a hashcode range and the index file name
|
||||
String[] readStr = null;
|
||||
while(read < masterStat.getLen()) {
|
||||
int b = lin.readLine(line);
|
||||
read += b;
|
||||
readStr = line.toString().split(" ");
|
||||
int startHash = Integer.parseInt(readStr[0]);
|
||||
int endHash = Integer.parseInt(readStr[1]);
|
||||
stores.add(new Store(Long.parseLong(readStr[2]),
|
||||
Long.parseLong(readStr[3]), startHash,
|
||||
endHash));
|
||||
line.clear();
|
||||
}
|
||||
try {
|
||||
// close the master index
|
||||
lin.close();
|
||||
} catch(IOException io){
|
||||
// do nothing just a read.
|
||||
in = fs.open(masterIndexPath);
|
||||
FileStatus masterStat = fs.getFileStatus(masterIndexPath);
|
||||
masterIndexTimestamp = masterStat.getModificationTime();
|
||||
lin = new LineReader(in, getConf());
|
||||
line = new Text();
|
||||
read = lin.readLine(line);
|
||||
|
||||
// the first line contains the version of the index file
|
||||
String versionLine = line.toString();
|
||||
String[] arr = versionLine.split(" ");
|
||||
version = Integer.parseInt(arr[0]);
|
||||
// make it always backwards-compatible
|
||||
if (this.version > HarFileSystem.VERSION) {
|
||||
throw new IOException("Invalid version " +
|
||||
this.version + " expected " + HarFileSystem.VERSION);
|
||||
}
|
||||
|
||||
// each line contains a hashcode range and the index file name
|
||||
String[] readStr = null;
|
||||
while(read < masterStat.getLen()) {
|
||||
int b = lin.readLine(line);
|
||||
read += b;
|
||||
readStr = line.toString().split(" ");
|
||||
int startHash = Integer.parseInt(readStr[0]);
|
||||
int endHash = Integer.parseInt(readStr[1]);
|
||||
stores.add(new Store(Long.parseLong(readStr[2]),
|
||||
Long.parseLong(readStr[3]), startHash,
|
||||
endHash));
|
||||
line.clear();
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, lin, in);
|
||||
}
|
||||
|
||||
FSDataInputStream aIn = fs.open(archiveIndexPath);
|
||||
FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
|
||||
archiveIndexTimestamp = archiveStat.getModificationTime();
|
||||
LineReader aLin;
|
||||
|
||||
// now start reading the real index file
|
||||
for (Store s: stores) {
|
||||
read = 0;
|
||||
aIn.seek(s.begin);
|
||||
aLin = new LineReader(aIn, getConf());
|
||||
while (read + s.begin < s.end) {
|
||||
int tmp = aLin.readLine(line);
|
||||
read += tmp;
|
||||
String lineFeed = line.toString();
|
||||
String[] parsed = lineFeed.split(" ");
|
||||
parsed[0] = decodeFileName(parsed[0]);
|
||||
archive.put(new Path(parsed[0]), new HarStatus(lineFeed));
|
||||
line.clear();
|
||||
}
|
||||
}
|
||||
try {
|
||||
// close the archive index
|
||||
aIn.close();
|
||||
} catch(IOException io) {
|
||||
// do nothing just a read.
|
||||
FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
|
||||
archiveIndexTimestamp = archiveStat.getModificationTime();
|
||||
LineReader aLin;
|
||||
|
||||
// now start reading the real index file
|
||||
for (Store s: stores) {
|
||||
read = 0;
|
||||
aIn.seek(s.begin);
|
||||
aLin = new LineReader(aIn, getConf());
|
||||
while (read + s.begin < s.end) {
|
||||
int tmp = aLin.readLine(line);
|
||||
read += tmp;
|
||||
String lineFeed = line.toString();
|
||||
String[] parsed = lineFeed.split(" ");
|
||||
parsed[0] = decodeFileName(parsed[0]);
|
||||
archive.put(new Path(parsed[0]), new HarStatus(lineFeed));
|
||||
line.clear();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, aIn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.fs.shell;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -28,6 +29,7 @@ import org.apache.hadoop.fs.PathIOException;
|
|||
import org.apache.hadoop.fs.PathIsDirectoryException;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
||||
import org.apache.hadoop.fs.PathNotFoundException;
|
||||
import org.apache.hadoop.fs.Trash;
|
||||
|
||||
/**
|
||||
|
@ -71,6 +73,19 @@ class Delete {
|
|||
skipTrash = cf.getOpt("skipTrash");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<PathData> expandArgument(String arg) throws IOException {
|
||||
try {
|
||||
return super.expandArgument(arg);
|
||||
} catch (PathNotFoundException e) {
|
||||
if (!ignoreFNF) {
|
||||
throw e;
|
||||
}
|
||||
// prevent -f on a non-existent glob from failing
|
||||
return new LinkedList<PathData>();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processNonexistentPath(PathData item) throws IOException {
|
||||
if (!ignoreFNF) super.processNonexistentPath(item);
|
||||
|
|
|
@ -48,7 +48,7 @@ class FsUsage extends FsCommand {
|
|||
|
||||
protected String formatSize(long size) {
|
||||
return humanReadable
|
||||
? StringUtils.humanReadableInt(size)
|
||||
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
|
||||
: String.valueOf(size);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ class Ls extends FsCommand {
|
|||
protected boolean humanReadable = false;
|
||||
protected String formatSize(long size) {
|
||||
return humanReadable
|
||||
? StringUtils.humanReadableInt(size)
|
||||
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
|
||||
: String.valueOf(size);
|
||||
}
|
||||
|
||||
|
|
|
@ -32,9 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
public class BoundedByteArrayOutputStream extends OutputStream {
|
||||
private final byte[] buffer;
|
||||
private byte[] buffer;
|
||||
private int startOffset;
|
||||
private int limit;
|
||||
private int count;
|
||||
private int currentPointer;
|
||||
|
||||
/**
|
||||
* Create a BoundedByteArrayOutputStream with the specified
|
||||
|
@ -52,20 +53,30 @@ public class BoundedByteArrayOutputStream extends OutputStream {
|
|||
* @param limit The maximum limit upto which data can be written
|
||||
*/
|
||||
public BoundedByteArrayOutputStream(int capacity, int limit) {
|
||||
this(new byte[capacity], 0, limit);
|
||||
}
|
||||
|
||||
protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
|
||||
resetBuffer(buf, offset, limit);
|
||||
}
|
||||
|
||||
protected void resetBuffer(byte[] buf, int offset, int limit) {
|
||||
int capacity = buf.length - offset;
|
||||
if ((capacity < limit) || (capacity | limit) < 0) {
|
||||
throw new IllegalArgumentException("Invalid capacity/limit");
|
||||
}
|
||||
this.buffer = new byte[capacity];
|
||||
this.limit = limit;
|
||||
this.count = 0;
|
||||
this.buffer = buf;
|
||||
this.startOffset = offset;
|
||||
this.currentPointer = offset;
|
||||
this.limit = offset + limit;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
if (count >= limit) {
|
||||
if (currentPointer >= limit) {
|
||||
throw new EOFException("Reaching the limit of the buffer.");
|
||||
}
|
||||
buffer[count++] = (byte) b;
|
||||
buffer[currentPointer++] = (byte) b;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -77,12 +88,12 @@ public class BoundedByteArrayOutputStream extends OutputStream {
|
|||
return;
|
||||
}
|
||||
|
||||
if (count + len > limit) {
|
||||
if (currentPointer + len > limit) {
|
||||
throw new EOFException("Reach the limit of the buffer");
|
||||
}
|
||||
|
||||
System.arraycopy(b, off, buffer, count, len);
|
||||
count += len;
|
||||
System.arraycopy(b, off, buffer, currentPointer, len);
|
||||
currentPointer += len;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -90,17 +101,17 @@ public class BoundedByteArrayOutputStream extends OutputStream {
|
|||
* @param newlim New Limit
|
||||
*/
|
||||
public void reset(int newlim) {
|
||||
if (newlim > buffer.length) {
|
||||
if (newlim > (buffer.length - startOffset)) {
|
||||
throw new IndexOutOfBoundsException("Limit exceeds buffer size");
|
||||
}
|
||||
this.limit = newlim;
|
||||
this.count = 0;
|
||||
this.currentPointer = startOffset;
|
||||
}
|
||||
|
||||
/** Reset the buffer */
|
||||
public void reset() {
|
||||
this.limit = buffer.length;
|
||||
this.count = 0;
|
||||
this.limit = buffer.length - startOffset;
|
||||
this.currentPointer = startOffset;
|
||||
}
|
||||
|
||||
/** Return the current limit */
|
||||
|
@ -119,6 +130,10 @@ public class BoundedByteArrayOutputStream extends OutputStream {
|
|||
* currently in the buffer.
|
||||
*/
|
||||
public int size() {
|
||||
return count;
|
||||
return currentPointer - startOffset;
|
||||
}
|
||||
|
||||
public int available() {
|
||||
return limit - currentPointer;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -203,4 +203,27 @@ public class SortedMapWritable extends AbstractMapWritable
|
|||
e.getValue().write(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj instanceof SortedMapWritable) {
|
||||
Map map = (Map) obj;
|
||||
if (size() != map.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return entrySet().equals(map.entrySet());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return instance.hashCode();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
@ -543,6 +544,7 @@ public class RetryPolicies {
|
|||
e instanceof NoRouteToHostException ||
|
||||
e instanceof UnknownHostException ||
|
||||
e instanceof StandbyException ||
|
||||
e instanceof ConnectTimeoutException ||
|
||||
isWrappedStandbyException(e)) {
|
||||
return new RetryAction(
|
||||
RetryAction.RetryDecision.FAILOVER_AND_RETRY,
|
||||
|
|
|
@ -67,6 +67,7 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
|
|||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
|
||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.KerberosInfo;
|
||||
import org.apache.hadoop.security.SaslRpcClient;
|
||||
|
@ -511,14 +512,14 @@ public class Client {
|
|||
}
|
||||
this.socket.setSoTimeout(pingInterval);
|
||||
return;
|
||||
} catch (SocketTimeoutException toe) {
|
||||
} catch (ConnectTimeoutException toe) {
|
||||
/* Check for an address change and update the local reference.
|
||||
* Reset the failure counter if the address was changed
|
||||
*/
|
||||
if (updateAddress()) {
|
||||
timeoutFailures = ioFailures = 0;
|
||||
}
|
||||
handleConnectionFailure(timeoutFailures++,
|
||||
handleConnectionTimeout(timeoutFailures++,
|
||||
maxRetriesOnSocketTimeouts, toe);
|
||||
} catch (IOException ie) {
|
||||
if (updateAddress()) {
|
||||
|
@ -680,7 +681,7 @@ public class Client {
|
|||
socket = null;
|
||||
}
|
||||
|
||||
/* Handle connection failures
|
||||
/* Handle connection failures due to timeout on connect
|
||||
*
|
||||
* If the current number of retries is equal to the max number of retries,
|
||||
* stop retrying and throw the exception; Otherwise backoff 1 second and
|
||||
|
@ -694,7 +695,7 @@ public class Client {
|
|||
* @param ioe failure reason
|
||||
* @throws IOException if max number of retries is reached
|
||||
*/
|
||||
private void handleConnectionFailure(
|
||||
private void handleConnectionTimeout(
|
||||
int curRetries, int maxRetries, IOException ioe) throws IOException {
|
||||
|
||||
closeConnection();
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import java.net.SocketTimeoutException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Thrown by {@link NetUtils#connect(java.net.Socket, java.net.SocketAddress, int)}
|
||||
* if it times out while connecting to the remote host.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class ConnectTimeoutException extends SocketTimeoutException {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public ConnectTimeoutException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.net;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.BindException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
|
@ -517,11 +518,15 @@ public class NetUtils {
|
|||
socket.bind(localAddr);
|
||||
}
|
||||
|
||||
if (ch == null) {
|
||||
// let the default implementation handle it.
|
||||
socket.connect(endpoint, timeout);
|
||||
} else {
|
||||
SocketIOWithTimeout.connect(ch, endpoint, timeout);
|
||||
try {
|
||||
if (ch == null) {
|
||||
// let the default implementation handle it.
|
||||
socket.connect(endpoint, timeout);
|
||||
} else {
|
||||
SocketIOWithTimeout.connect(ch, endpoint, timeout);
|
||||
}
|
||||
} catch (SocketTimeoutException ste) {
|
||||
throw new ConnectTimeoutException(ste.getMessage());
|
||||
}
|
||||
|
||||
// There is a very rare case allowed by the TCP specification, such that
|
||||
|
@ -719,7 +724,7 @@ public class NetUtils {
|
|||
+ see("BindException"));
|
||||
} else if (exception instanceof ConnectException) {
|
||||
// connection refused; include the host:port in the error
|
||||
return (ConnectException) new ConnectException(
|
||||
return wrapWithMessage(exception,
|
||||
"Call From "
|
||||
+ localHost
|
||||
+ " to "
|
||||
|
@ -729,32 +734,28 @@ public class NetUtils {
|
|||
+ " failed on connection exception: "
|
||||
+ exception
|
||||
+ ";"
|
||||
+ see("ConnectionRefused"))
|
||||
.initCause(exception);
|
||||
+ see("ConnectionRefused"));
|
||||
} else if (exception instanceof UnknownHostException) {
|
||||
return (UnknownHostException) new UnknownHostException(
|
||||
return wrapWithMessage(exception,
|
||||
"Invalid host name: "
|
||||
+ getHostDetailsAsString(destHost, destPort, localHost)
|
||||
+ exception
|
||||
+ ";"
|
||||
+ see("UnknownHost"))
|
||||
.initCause(exception);
|
||||
+ see("UnknownHost"));
|
||||
} else if (exception instanceof SocketTimeoutException) {
|
||||
return (SocketTimeoutException) new SocketTimeoutException(
|
||||
return wrapWithMessage(exception,
|
||||
"Call From "
|
||||
+ localHost + " to " + destHost + ":" + destPort
|
||||
+ " failed on socket timeout exception: " + exception
|
||||
+ ";"
|
||||
+ see("SocketTimeout"))
|
||||
.initCause(exception);
|
||||
+ see("SocketTimeout"));
|
||||
} else if (exception instanceof NoRouteToHostException) {
|
||||
return (NoRouteToHostException) new NoRouteToHostException(
|
||||
return wrapWithMessage(exception,
|
||||
"No Route to Host from "
|
||||
+ localHost + " to " + destHost + ":" + destPort
|
||||
+ " failed on socket timeout exception: " + exception
|
||||
+ ";"
|
||||
+ see("NoRouteToHost"))
|
||||
.initCause(exception);
|
||||
+ see("NoRouteToHost"));
|
||||
}
|
||||
else {
|
||||
return (IOException) new IOException("Failed on local exception: "
|
||||
|
@ -769,6 +770,21 @@ public class NetUtils {
|
|||
private static String see(final String entry) {
|
||||
return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T extends IOException> T wrapWithMessage(
|
||||
T exception, String msg) {
|
||||
Class<? extends Throwable> clazz = exception.getClass();
|
||||
try {
|
||||
Constructor<? extends Throwable> ctor = clazz.getConstructor(String.class);
|
||||
Throwable t = ctor.newInstance(msg);
|
||||
return (T)(t.initCause(exception));
|
||||
} catch (Throwable e) {
|
||||
LOG.warn("Unable to wrap exception of type " +
|
||||
clazz + ": it has no (String) constructor", e);
|
||||
return exception;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the host details as a string
|
||||
|
|
|
@ -301,17 +301,25 @@ public class UserGroupInformation {
|
|||
|
||||
private static String OS_LOGIN_MODULE_NAME;
|
||||
private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
|
||||
|
||||
private static final boolean windows =
|
||||
System.getProperty("os.name").startsWith("Windows");
|
||||
private static final boolean is64Bit =
|
||||
System.getProperty("os.arch").contains("64");
|
||||
private static final boolean ibmJava = System.getProperty("java.vendor").contains("IBM");
|
||||
private static final boolean aix = System.getProperty("os.name").equals("AIX");
|
||||
|
||||
/* Return the OS login module class name */
|
||||
private static String getOSLoginModuleName() {
|
||||
if (System.getProperty("java.vendor").contains("IBM")) {
|
||||
return windows ? (is64Bit
|
||||
? "com.ibm.security.auth.module.Win64LoginModule"
|
||||
: "com.ibm.security.auth.module.NTLoginModule")
|
||||
: "com.ibm.security.auth.module.LinuxLoginModule";
|
||||
if (ibmJava) {
|
||||
if (windows) {
|
||||
return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
|
||||
: "com.ibm.security.auth.module.NTLoginModule";
|
||||
} else if (aix) {
|
||||
return "com.ibm.security.auth.module.AIXLoginModule";
|
||||
} else {
|
||||
return "com.ibm.security.auth.module.LinuxLoginModule";
|
||||
}
|
||||
} else {
|
||||
return windows ? "com.sun.security.auth.module.NTLoginModule"
|
||||
: "com.sun.security.auth.module.UnixLoginModule";
|
||||
|
@ -323,11 +331,14 @@ public class UserGroupInformation {
|
|||
private static Class<? extends Principal> getOsPrincipalClass() {
|
||||
ClassLoader cl = ClassLoader.getSystemClassLoader();
|
||||
try {
|
||||
if (System.getProperty("java.vendor").contains("IBM")) {
|
||||
if (ibmJava) {
|
||||
if (windows) {
|
||||
return (Class<? extends Principal>) (is64Bit
|
||||
? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
|
||||
: cl.loadClass("com.ibm.security.auth.NTUserPrincipal"));
|
||||
} else if (aix) {
|
||||
return (Class<? extends Principal>)
|
||||
cl.loadClass("com.ibm.security.auth.AIXPrincipal");
|
||||
} else {
|
||||
return (Class<? extends Principal>) (is64Bit
|
||||
? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
|
||||
|
@ -418,12 +429,21 @@ public class UserGroupInformation {
|
|||
private static final Map<String,String> USER_KERBEROS_OPTIONS =
|
||||
new HashMap<String,String>();
|
||||
static {
|
||||
USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
|
||||
USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
|
||||
USER_KERBEROS_OPTIONS.put("renewTGT", "true");
|
||||
if (ibmJava) {
|
||||
USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
|
||||
} else {
|
||||
USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
|
||||
USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
|
||||
USER_KERBEROS_OPTIONS.put("renewTGT", "true");
|
||||
}
|
||||
String ticketCache = System.getenv("KRB5CCNAME");
|
||||
if (ticketCache != null) {
|
||||
USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
|
||||
if (ibmJava) {
|
||||
// The first value searched when "useDefaultCcache" is used.
|
||||
System.setProperty("KRB5CCNAME", ticketCache);
|
||||
} else {
|
||||
USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
|
||||
}
|
||||
}
|
||||
USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
|
||||
}
|
||||
|
@ -434,10 +454,14 @@ public class UserGroupInformation {
|
|||
private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS =
|
||||
new HashMap<String,String>();
|
||||
static {
|
||||
KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
|
||||
KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
|
||||
KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
|
||||
KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
|
||||
if (ibmJava) {
|
||||
KEYTAB_KERBEROS_OPTIONS.put("credsType", "both");
|
||||
} else {
|
||||
KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
|
||||
KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
|
||||
KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
|
||||
KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
|
||||
}
|
||||
KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
|
||||
}
|
||||
private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
|
||||
|
@ -462,7 +486,12 @@ public class UserGroupInformation {
|
|||
} else if (USER_KERBEROS_CONFIG_NAME.equals(appName)) {
|
||||
return USER_KERBEROS_CONF;
|
||||
} else if (KEYTAB_KERBEROS_CONFIG_NAME.equals(appName)) {
|
||||
KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
|
||||
if (ibmJava) {
|
||||
KEYTAB_KERBEROS_OPTIONS.put("useKeytab",
|
||||
prependFileAuthority(keytabFile));
|
||||
} else {
|
||||
KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
|
||||
}
|
||||
KEYTAB_KERBEROS_OPTIONS.put("principal", keytabPrincipal);
|
||||
return KEYTAB_KERBEROS_CONF;
|
||||
}
|
||||
|
@ -470,6 +499,11 @@ public class UserGroupInformation {
|
|||
}
|
||||
}
|
||||
|
||||
private static String prependFileAuthority(String keytabPath) {
|
||||
return keytabPath.startsWith("file://") ? keytabPath
|
||||
: "file://" + keytabPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a javax.security configuration that is created at runtime.
|
||||
*/
|
||||
|
@ -666,6 +700,7 @@ public class UserGroupInformation {
|
|||
}
|
||||
loginUser.spawnAutoRenewalThreadForUserCreds();
|
||||
} catch (LoginException le) {
|
||||
LOG.debug("failure to login", le);
|
||||
throw new IOException("failure to login", le);
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
|
@ -39,7 +40,7 @@ import org.apache.hadoop.io.Text;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
public class LineReader {
|
||||
public class LineReader implements Closeable {
|
||||
private static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
|
||||
private int bufferSize = DEFAULT_BUFFER_SIZE;
|
||||
private InputStream in;
|
||||
|
|
|
@ -23,8 +23,6 @@ import java.io.StringWriter;
|
|||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.text.DateFormat;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.NumberFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -34,12 +32,13 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
|
||||
/**
|
||||
* General string utils
|
||||
*/
|
||||
|
@ -52,13 +51,6 @@ public class StringUtils {
|
|||
*/
|
||||
public static final int SHUTDOWN_HOOK_PRIORITY = 0;
|
||||
|
||||
private static final DecimalFormat decimalFormat;
|
||||
static {
|
||||
NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
|
||||
decimalFormat = (DecimalFormat) numberFormat;
|
||||
decimalFormat.applyPattern("#.##");
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a string representation of the exception.
|
||||
* @param e The exception to stringify
|
||||
|
@ -87,50 +79,33 @@ public class StringUtils {
|
|||
}
|
||||
return fullHostname;
|
||||
}
|
||||
|
||||
private static DecimalFormat oneDecimal = new DecimalFormat("0.0");
|
||||
|
||||
/**
|
||||
* Given an integer, return a string that is in an approximate, but human
|
||||
* readable format.
|
||||
* It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
|
||||
* @param number the number to format
|
||||
* @return a human readable form of the integer
|
||||
*
|
||||
* @deprecated use {@link TraditionalBinaryPrefix#long2String(long, String, int)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public static String humanReadableInt(long number) {
|
||||
long absNumber = Math.abs(number);
|
||||
double result = number;
|
||||
String suffix = "";
|
||||
if (absNumber < 1024) {
|
||||
// since no division has occurred, don't format with a decimal point
|
||||
return String.valueOf(number);
|
||||
} else if (absNumber < 1024 * 1024) {
|
||||
result = number / 1024.0;
|
||||
suffix = "k";
|
||||
} else if (absNumber < 1024 * 1024 * 1024) {
|
||||
result = number / (1024.0 * 1024);
|
||||
suffix = "m";
|
||||
} else {
|
||||
result = number / (1024.0 * 1024 * 1024);
|
||||
suffix = "g";
|
||||
}
|
||||
return oneDecimal.format(result) + suffix;
|
||||
return TraditionalBinaryPrefix.long2String(number, "", 1);
|
||||
}
|
||||
|
||||
|
||||
/** The same as String.format(Locale.ENGLISH, format, objects). */
|
||||
public static String format(final String format, final Object... objects) {
|
||||
return String.format(Locale.ENGLISH, format, objects);
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a percentage for presentation to the user.
|
||||
* @param done the percentage to format (0.0 to 1.0)
|
||||
* @param digits the number of digits past the decimal point
|
||||
* @param fraction the percentage as a fraction, e.g. 0.1 = 10%
|
||||
* @param decimalPlaces the number of decimal places
|
||||
* @return a string representation of the percentage
|
||||
*/
|
||||
public static String formatPercent(double done, int digits) {
|
||||
DecimalFormat percentFormat = new DecimalFormat("0.00%");
|
||||
double scale = Math.pow(10.0, digits+2);
|
||||
double rounded = Math.floor(done * scale);
|
||||
percentFormat.setDecimalSeparatorAlwaysShown(false);
|
||||
percentFormat.setMinimumFractionDigits(digits);
|
||||
percentFormat.setMaximumFractionDigits(digits);
|
||||
return percentFormat.format(rounded / scale);
|
||||
public static String formatPercent(double fraction, int decimalPlaces) {
|
||||
return format("%." + decimalPlaces + "f%%", fraction*100);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -165,7 +140,7 @@ public class StringUtils {
|
|||
}
|
||||
StringBuilder s = new StringBuilder();
|
||||
for(int i = start; i < end; i++) {
|
||||
s.append(String.format("%02x", bytes[i]));
|
||||
s.append(format("%02x", bytes[i]));
|
||||
}
|
||||
return s.toString();
|
||||
}
|
||||
|
@ -630,18 +605,22 @@ public class StringUtils {
|
|||
* TraditionalBinaryPrefix symbol are case insensitive.
|
||||
*/
|
||||
public static enum TraditionalBinaryPrefix {
|
||||
KILO(1024),
|
||||
MEGA(KILO.value << 10),
|
||||
GIGA(MEGA.value << 10),
|
||||
TERA(GIGA.value << 10),
|
||||
PETA(TERA.value << 10),
|
||||
EXA(PETA.value << 10);
|
||||
KILO(10),
|
||||
MEGA(KILO.bitShift + 10),
|
||||
GIGA(MEGA.bitShift + 10),
|
||||
TERA(GIGA.bitShift + 10),
|
||||
PETA(TERA.bitShift + 10),
|
||||
EXA (PETA.bitShift + 10);
|
||||
|
||||
public final long value;
|
||||
public final char symbol;
|
||||
public final int bitShift;
|
||||
public final long bitMask;
|
||||
|
||||
TraditionalBinaryPrefix(long value) {
|
||||
this.value = value;
|
||||
private TraditionalBinaryPrefix(int bitShift) {
|
||||
this.bitShift = bitShift;
|
||||
this.value = 1L << bitShift;
|
||||
this.bitMask = this.value - 1L;
|
||||
this.symbol = toString().charAt(0);
|
||||
}
|
||||
|
||||
|
@ -692,8 +671,58 @@ public class StringUtils {
|
|||
return num * prefix;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a long integer to a string with traditional binary prefix.
|
||||
*
|
||||
* @param n the value to be converted
|
||||
* @param unit The unit, e.g. "B" for bytes.
|
||||
* @param decimalPlaces The number of decimal places.
|
||||
* @return a string with traditional binary prefix.
|
||||
*/
|
||||
public static String long2String(long n, String unit, int decimalPlaces) {
|
||||
if (unit == null) {
|
||||
unit = "";
|
||||
}
|
||||
//take care a special case
|
||||
if (n == Long.MIN_VALUE) {
|
||||
return "-8 " + EXA.symbol + unit;
|
||||
}
|
||||
|
||||
final StringBuilder b = new StringBuilder();
|
||||
//take care negative numbers
|
||||
if (n < 0) {
|
||||
b.append('-');
|
||||
n = -n;
|
||||
}
|
||||
if (n < KILO.value) {
|
||||
//no prefix
|
||||
b.append(n);
|
||||
return (unit.isEmpty()? b: b.append(" ").append(unit)).toString();
|
||||
} else {
|
||||
//find traditional binary prefix
|
||||
int i = 0;
|
||||
for(; i < values().length && n >= values()[i].value; i++);
|
||||
TraditionalBinaryPrefix prefix = values()[i - 1];
|
||||
|
||||
if ((n & prefix.bitMask) == 0) {
|
||||
//exact division
|
||||
b.append(n >> prefix.bitShift);
|
||||
} else {
|
||||
final String format = "%." + decimalPlaces + "f";
|
||||
String s = format(format, n/(double)prefix.value);
|
||||
//check a special rounding up case
|
||||
if (s.startsWith("1024")) {
|
||||
prefix = values()[i];
|
||||
s = format(format, n/(double)prefix.value);
|
||||
}
|
||||
b.append(s);
|
||||
}
|
||||
return b.append(' ').append(prefix.symbol).append(unit).toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Escapes HTML Special characters present in the string.
|
||||
* @param string
|
||||
|
@ -731,32 +760,16 @@ public class StringUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Return an abbreviated English-language desc of the byte length
|
||||
* @return a byte description of the given long interger value.
|
||||
*/
|
||||
public static String byteDesc(long len) {
|
||||
double val = 0.0;
|
||||
String ending = "";
|
||||
if (len < 1024 * 1024) {
|
||||
val = (1.0 * len) / 1024;
|
||||
ending = " KB";
|
||||
} else if (len < 1024 * 1024 * 1024) {
|
||||
val = (1.0 * len) / (1024 * 1024);
|
||||
ending = " MB";
|
||||
} else if (len < 1024L * 1024 * 1024 * 1024) {
|
||||
val = (1.0 * len) / (1024 * 1024 * 1024);
|
||||
ending = " GB";
|
||||
} else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
|
||||
val = (1.0 * len) / (1024L * 1024 * 1024 * 1024);
|
||||
ending = " TB";
|
||||
} else {
|
||||
val = (1.0 * len) / (1024L * 1024 * 1024 * 1024 * 1024);
|
||||
ending = " PB";
|
||||
}
|
||||
return limitDecimalTo2(val) + ending;
|
||||
return TraditionalBinaryPrefix.long2String(len, "B", 2);
|
||||
}
|
||||
|
||||
public static synchronized String limitDecimalTo2(double d) {
|
||||
return decimalFormat.format(d);
|
||||
/** @deprecated use StringUtils.format("%.2f", d). */
|
||||
@Deprecated
|
||||
public static String limitDecimalTo2(double d) {
|
||||
return format("%.2f", d);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -325,6 +325,13 @@ Hadoop MapReduce Next Generation - Cluster Setup
|
|||
| | | How long to keep aggregation logs before deleting them. -1 disables. |
|
||||
| | | Be careful, set this too small and you will spam the name node. |
|
||||
*-------------------------+-------------------------+------------------------+
|
||||
| <<<yarn.log-aggregation.retain-check-interval-seconds>>> | | |
|
||||
| | <-1> | |
|
||||
| | | Time between checks for aggregated log retention. If set to 0 or a |
|
||||
| | | negative value then the value is computed as one-tenth of the |
|
||||
| | | aggregated log retention time. |
|
||||
| | | Be careful, set this too small and you will spam the name node. |
|
||||
*-------------------------+-------------------------+------------------------+
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -303,6 +303,46 @@ public class TestFsShellReturnCode {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRmWithNonexistentGlob() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
final PrintStream err = new PrintStream(bytes);
|
||||
final PrintStream oldErr = System.err;
|
||||
System.setErr(err);
|
||||
final String results;
|
||||
try {
|
||||
int exit = shell.run(new String[]{"-rm", "nomatch*"});
|
||||
assertEquals(1, exit);
|
||||
results = bytes.toString();
|
||||
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
|
||||
} finally {
|
||||
IOUtils.closeStream(err);
|
||||
System.setErr(oldErr);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRmForceWithNonexistentGlob() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
final PrintStream err = new PrintStream(bytes);
|
||||
final PrintStream oldErr = System.err;
|
||||
System.setErr(err);
|
||||
try {
|
||||
int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
|
||||
assertEquals(0, exit);
|
||||
assertTrue(bytes.toString().isEmpty());
|
||||
} finally {
|
||||
IOUtils.closeStream(err);
|
||||
System.setErr(oldErr);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidDefaultFS() throws Exception {
|
||||
// if default fs doesn't exist or is invalid, but the path provided in
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.net.URI;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -46,8 +47,18 @@ public class TestHarFileSystemBasics {
|
|||
|
||||
private static final String ROOT_PATH = System.getProperty("test.build.data",
|
||||
"build/test/data");
|
||||
private static final Path rootPath = new Path(
|
||||
new File(ROOT_PATH).getAbsolutePath() + "/localfs");
|
||||
private static final Path rootPath;
|
||||
static {
|
||||
String root = new Path(new File(ROOT_PATH).getAbsolutePath(), "localfs")
|
||||
.toUri().getPath();
|
||||
// Strip drive specifier on Windows, which would make the HAR URI invalid and
|
||||
// cause tests to fail.
|
||||
if (Shell.WINDOWS) {
|
||||
root = root.substring(root.indexOf(':') + 1);
|
||||
}
|
||||
rootPath = new Path(root);
|
||||
}
|
||||
|
||||
// NB: .har suffix is necessary
|
||||
private static final Path harPath = new Path(rootPath, "path1/path2/my.har");
|
||||
|
||||
|
|
|
@ -45,19 +45,39 @@ public class TestListFiles {
|
|||
|
||||
final protected static Configuration conf = new Configuration();
|
||||
protected static FileSystem fs;
|
||||
final protected static Path TEST_DIR = getTestDir();
|
||||
protected static Path TEST_DIR;
|
||||
final private static int FILE_LEN = 10;
|
||||
final private static Path FILE1 = new Path(TEST_DIR, "file1");
|
||||
final private static Path DIR1 = new Path(TEST_DIR, "dir1");
|
||||
final private static Path FILE2 = new Path(DIR1, "file2");
|
||||
final private static Path FILE3 = new Path(DIR1, "file3");
|
||||
private static Path FILE1;
|
||||
private static Path DIR1;
|
||||
private static Path FILE2;
|
||||
private static Path FILE3;
|
||||
|
||||
static {
|
||||
setTestPaths(new Path(
|
||||
System.getProperty("test.build.data", "build/test/data/work-dir/localfs"),
|
||||
"main_"));
|
||||
}
|
||||
|
||||
protected static Path getTestDir() {
|
||||
return new Path(
|
||||
System.getProperty("test.build.data","build/test/data/work-dir/localfs"),
|
||||
"main_");
|
||||
return TEST_DIR;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the root testing directory and reinitializes any additional test paths
|
||||
* that are under the root. This method is intended to be called from a
|
||||
* subclass's @BeforeClass method if there is a need to override the testing
|
||||
* directory.
|
||||
*
|
||||
* @param testDir Path root testing directory
|
||||
*/
|
||||
protected static void setTestPaths(Path testDir) {
|
||||
TEST_DIR = testDir;
|
||||
FILE1 = new Path(TEST_DIR, "file1");
|
||||
DIR1 = new Path(TEST_DIR, "dir1");
|
||||
FILE2 = new Path(DIR1, "file2");
|
||||
FILE3 = new Path(DIR1, "file3");
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void testSetUp() throws Exception {
|
||||
fs = FileSystem.getLocal(conf);
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.io.*;
|
|||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -38,8 +39,9 @@ public class TestLocalFileSystem {
|
|||
private static final String TEST_ROOT_DIR
|
||||
= System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
|
||||
|
||||
private final File base = new File(TEST_ROOT_DIR);
|
||||
private Configuration conf;
|
||||
private FileSystem fileSys;
|
||||
private LocalFileSystem fileSys;
|
||||
|
||||
private void cleanupFile(FileSystem fs, Path name) throws IOException {
|
||||
assertTrue(fs.exists(name));
|
||||
|
@ -53,6 +55,13 @@ public class TestLocalFileSystem {
|
|||
fileSys = FileSystem.getLocal(conf);
|
||||
fileSys.delete(new Path(TEST_ROOT_DIR), true);
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() throws IOException {
|
||||
base.setWritable(true);
|
||||
FileUtil.fullyDelete(base);
|
||||
assertTrue(!base.exists());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the capability of setting the working directory.
|
||||
|
@ -269,10 +278,83 @@ public class TestLocalFileSystem {
|
|||
LocalFileSystem fs = FileSystem.getLocal(conf);
|
||||
File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
|
||||
colonFile.mkdirs();
|
||||
colonFile.createNewFile();
|
||||
FileStatus[] stats = fs.listStatus(new Path(TEST_ROOT_DIR));
|
||||
assertEquals("Unexpected number of stats", 1, stats.length);
|
||||
assertEquals("Bad path from stat", colonFile.getAbsolutePath(),
|
||||
stats[0].getPath().toUri().getPath());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReportChecksumFailure() throws IOException {
|
||||
base.mkdirs();
|
||||
assertTrue(base.exists() && base.isDirectory());
|
||||
|
||||
final File dir1 = new File(base, "dir1");
|
||||
final File dir2 = new File(dir1, "dir2");
|
||||
dir2.mkdirs();
|
||||
assertTrue(dir2.exists() && dir2.canWrite());
|
||||
|
||||
final String dataFileName = "corruptedData";
|
||||
final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
|
||||
final Path checksumPath = fileSys.getChecksumFile(dataPath);
|
||||
final FSDataOutputStream fsdos = fileSys.create(dataPath);
|
||||
try {
|
||||
fsdos.writeUTF("foo");
|
||||
} finally {
|
||||
fsdos.close();
|
||||
}
|
||||
assertTrue(fileSys.pathToFile(dataPath).exists());
|
||||
final long dataFileLength = fileSys.getFileStatus(dataPath).getLen();
|
||||
assertTrue(dataFileLength > 0);
|
||||
|
||||
// check the the checksum file is created and not empty:
|
||||
assertTrue(fileSys.pathToFile(checksumPath).exists());
|
||||
final long checksumFileLength = fileSys.getFileStatus(checksumPath).getLen();
|
||||
assertTrue(checksumFileLength > 0);
|
||||
|
||||
// this is a hack to force the #reportChecksumFailure() method to stop
|
||||
// climbing up at the 'base' directory and use 'dir1/bad_files' as the
|
||||
// corrupted files storage:
|
||||
base.setWritable(false);
|
||||
|
||||
FSDataInputStream dataFsdis = fileSys.open(dataPath);
|
||||
FSDataInputStream checksumFsdis = fileSys.open(checksumPath);
|
||||
|
||||
boolean retryIsNecessary = fileSys.reportChecksumFailure(dataPath, dataFsdis, 0, checksumFsdis, 0);
|
||||
assertTrue(!retryIsNecessary);
|
||||
|
||||
// the data file should be moved:
|
||||
assertTrue(!fileSys.pathToFile(dataPath).exists());
|
||||
// the checksum file should be moved:
|
||||
assertTrue(!fileSys.pathToFile(checksumPath).exists());
|
||||
|
||||
// check that the files exist in the new location where they were moved:
|
||||
File[] dir1files = dir1.listFiles(new FileFilter() {
|
||||
@Override
|
||||
public boolean accept(File pathname) {
|
||||
return pathname != null && !pathname.getName().equals("dir2");
|
||||
}
|
||||
});
|
||||
assertTrue(dir1files != null);
|
||||
assertTrue(dir1files.length == 1);
|
||||
File badFilesDir = dir1files[0];
|
||||
|
||||
File[] badFiles = badFilesDir.listFiles();
|
||||
assertTrue(badFiles != null);
|
||||
assertTrue(badFiles.length == 2);
|
||||
boolean dataFileFound = false;
|
||||
boolean checksumFileFound = false;
|
||||
for (File badFile: badFiles) {
|
||||
if (badFile.getName().startsWith(dataFileName)) {
|
||||
assertTrue(dataFileLength == badFile.length());
|
||||
dataFileFound = true;
|
||||
} else if (badFile.getName().contains(dataFileName + ".crc")) {
|
||||
assertTrue(checksumFileLength == badFile.length());
|
||||
checksumFileFound = true;
|
||||
}
|
||||
}
|
||||
assertTrue(dataFileFound);
|
||||
assertTrue(checksumFileFound);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -88,4 +88,61 @@ public class TestBoundedByteArrayOutputStream extends TestCase {
|
|||
assertTrue("Writing beyond limit did not throw an exception",
|
||||
caughtException);
|
||||
}
|
||||
|
||||
|
||||
static class ResettableBoundedByteArrayOutputStream
|
||||
extends BoundedByteArrayOutputStream {
|
||||
|
||||
public ResettableBoundedByteArrayOutputStream(int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
|
||||
public void resetBuffer(byte[] buf, int offset, int length) {
|
||||
super.resetBuffer(buf, offset, length);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testResetBuffer() throws IOException {
|
||||
|
||||
ResettableBoundedByteArrayOutputStream stream =
|
||||
new ResettableBoundedByteArrayOutputStream(SIZE);
|
||||
|
||||
// Write to the stream, get the data back and check for contents
|
||||
stream.write(INPUT, 0, SIZE);
|
||||
assertTrue("Array Contents Mismatch",
|
||||
Arrays.equals(INPUT, stream.getBuffer()));
|
||||
|
||||
// Try writing beyond end of buffer. Should throw an exception
|
||||
boolean caughtException = false;
|
||||
|
||||
try {
|
||||
stream.write(INPUT[0]);
|
||||
} catch (Exception e) {
|
||||
caughtException = true;
|
||||
}
|
||||
|
||||
assertTrue("Writing beyond limit did not throw an exception",
|
||||
caughtException);
|
||||
|
||||
//Reset the stream and try, should succeed
|
||||
byte[] newBuf = new byte[SIZE];
|
||||
stream.resetBuffer(newBuf, 0, newBuf.length);
|
||||
assertTrue("Limit did not get reset correctly",
|
||||
(stream.getLimit() == SIZE));
|
||||
stream.write(INPUT, 0, SIZE);
|
||||
assertTrue("Array Contents Mismatch",
|
||||
Arrays.equals(INPUT, stream.getBuffer()));
|
||||
|
||||
// Try writing one more byte, should fail
|
||||
caughtException = false;
|
||||
try {
|
||||
stream.write(INPUT[0]);
|
||||
} catch (Exception e) {
|
||||
caughtException = true;
|
||||
}
|
||||
assertTrue("Writing beyond limit did not throw an exception",
|
||||
caughtException);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,15 +17,20 @@
|
|||
*/
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.util.Map;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import java.util.Map;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Tests SortedMapWritable
|
||||
*/
|
||||
public class TestSortedMapWritable extends TestCase {
|
||||
public class TestSortedMapWritable {
|
||||
/** the test */
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSortedMapWritable() {
|
||||
Text[] keys = {
|
||||
|
@ -90,6 +95,7 @@ public class TestSortedMapWritable extends TestCase {
|
|||
/**
|
||||
* Test that number of "unknown" classes is propagated across multiple copies.
|
||||
*/
|
||||
@Test
|
||||
@SuppressWarnings("deprecation")
|
||||
public void testForeignClass() {
|
||||
SortedMapWritable inMap = new SortedMapWritable();
|
||||
|
@ -99,4 +105,63 @@ public class TestSortedMapWritable extends TestCase {
|
|||
SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
|
||||
assertEquals(1, copyOfCopy.getNewClasses());
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests if equal and hashCode method still hold the contract.
|
||||
*/
|
||||
@Test
|
||||
public void testEqualsAndHashCode() {
|
||||
String failureReason;
|
||||
SortedMapWritable mapA = new SortedMapWritable();
|
||||
SortedMapWritable mapB = new SortedMapWritable();
|
||||
|
||||
// Sanity checks
|
||||
failureReason = "SortedMapWritable couldn't be initialized. Got null reference";
|
||||
assertNotNull(failureReason, mapA);
|
||||
assertNotNull(failureReason, mapB);
|
||||
|
||||
// Basic null check
|
||||
assertFalse("equals method returns true when passed null", mapA.equals(null));
|
||||
|
||||
// When entry set is empty, they should be equal
|
||||
assertTrue("Two empty SortedMapWritables are no longer equal", mapA.equals(mapB));
|
||||
|
||||
// Setup
|
||||
Text[] keys = {
|
||||
new Text("key1"),
|
||||
new Text("key2")
|
||||
};
|
||||
|
||||
BytesWritable[] values = {
|
||||
new BytesWritable("value1".getBytes()),
|
||||
new BytesWritable("value2".getBytes())
|
||||
};
|
||||
|
||||
mapA.put(keys[0], values[0]);
|
||||
mapB.put(keys[1], values[1]);
|
||||
|
||||
// entrySets are different
|
||||
failureReason = "Two SortedMapWritables with different data are now equal";
|
||||
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
|
||||
assertTrue(failureReason, !mapA.equals(mapB));
|
||||
assertTrue(failureReason, !mapB.equals(mapA));
|
||||
|
||||
mapA.put(keys[1], values[1]);
|
||||
mapB.put(keys[0], values[0]);
|
||||
|
||||
// entrySets are now same
|
||||
failureReason = "Two SortedMapWritables with same entry sets formed in different order are now different";
|
||||
assertEquals(failureReason, mapA.hashCode(), mapB.hashCode());
|
||||
assertTrue(failureReason, mapA.equals(mapB));
|
||||
assertTrue(failureReason, mapB.equals(mapA));
|
||||
|
||||
// Let's check if entry sets of same keys but different values
|
||||
mapA.put(keys[0], values[1]);
|
||||
mapA.put(keys[1], values[0]);
|
||||
|
||||
failureReason = "Two SortedMapWritables with different content are now equal";
|
||||
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
|
||||
assertTrue(failureReason, !mapA.equals(mapB));
|
||||
assertTrue(failureReason, !mapB.equals(mapA));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.io.IntWritable;
|
|||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
import java.util.Random;
|
||||
|
@ -586,7 +587,7 @@ public class TestIPC {
|
|||
private void assertRetriesOnSocketTimeouts(Configuration conf,
|
||||
int maxTimeoutRetries) throws IOException, InterruptedException {
|
||||
SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
|
||||
doThrow(new SocketTimeoutException()).when(mockFactory).createSocket();
|
||||
doThrow(new ConnectTimeoutException("fake")).when(mockFactory).createSocket();
|
||||
Client client = new Client(IntWritable.class, conf, mockFactory);
|
||||
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 9090);
|
||||
try {
|
||||
|
|
|
@ -108,7 +108,7 @@ public abstract class GetGroupsTestBase {
|
|||
for (String group : user.getGroupNames()) {
|
||||
expectedOutput += " " + group;
|
||||
}
|
||||
return expectedOutput + "\n";
|
||||
return expectedOutput + System.getProperty("line.separator");
|
||||
}
|
||||
|
||||
private String runTool(Configuration conf, String[] args, boolean success)
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
|
||||
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.string2long;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -26,6 +28,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.test.UnitTestcaseTimeLimit;
|
||||
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestStringUtils extends UnitTestcaseTimeLimit {
|
||||
|
@ -134,45 +137,34 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
|
|||
|
||||
@Test
|
||||
public void testTraditionalBinaryPrefix() throws Exception {
|
||||
//test string2long(..)
|
||||
String[] symbol = {"k", "m", "g", "t", "p", "e"};
|
||||
long m = 1024;
|
||||
for(String s : symbol) {
|
||||
assertEquals(0, StringUtils.TraditionalBinaryPrefix.string2long(0 + s));
|
||||
assertEquals(m, StringUtils.TraditionalBinaryPrefix.string2long(1 + s));
|
||||
assertEquals(0, string2long(0 + s));
|
||||
assertEquals(m, string2long(1 + s));
|
||||
m *= 1024;
|
||||
}
|
||||
|
||||
assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0"));
|
||||
assertEquals(1024L, StringUtils.TraditionalBinaryPrefix.string2long("1k"));
|
||||
assertEquals(-1024L, StringUtils.TraditionalBinaryPrefix.string2long("-1k"));
|
||||
assertEquals(1259520L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("1230K"));
|
||||
assertEquals(-1259520L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("-1230K"));
|
||||
assertEquals(104857600L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("100m"));
|
||||
assertEquals(-104857600L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("-100M"));
|
||||
assertEquals(956703965184L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("891g"));
|
||||
assertEquals(-956703965184L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("-891G"));
|
||||
assertEquals(501377302265856L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("456t"));
|
||||
assertEquals(-501377302265856L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("-456T"));
|
||||
assertEquals(11258999068426240L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("10p"));
|
||||
assertEquals(-11258999068426240L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("-10P"));
|
||||
assertEquals(1152921504606846976L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("1e"));
|
||||
assertEquals(-1152921504606846976L,
|
||||
StringUtils.TraditionalBinaryPrefix.string2long("-1E"));
|
||||
assertEquals(0L, string2long("0"));
|
||||
assertEquals(1024L, string2long("1k"));
|
||||
assertEquals(-1024L, string2long("-1k"));
|
||||
assertEquals(1259520L, string2long("1230K"));
|
||||
assertEquals(-1259520L, string2long("-1230K"));
|
||||
assertEquals(104857600L, string2long("100m"));
|
||||
assertEquals(-104857600L, string2long("-100M"));
|
||||
assertEquals(956703965184L, string2long("891g"));
|
||||
assertEquals(-956703965184L, string2long("-891G"));
|
||||
assertEquals(501377302265856L, string2long("456t"));
|
||||
assertEquals(-501377302265856L, string2long("-456T"));
|
||||
assertEquals(11258999068426240L, string2long("10p"));
|
||||
assertEquals(-11258999068426240L, string2long("-10P"));
|
||||
assertEquals(1152921504606846976L, string2long("1e"));
|
||||
assertEquals(-1152921504606846976L, string2long("-1E"));
|
||||
|
||||
String tooLargeNumStr = "10e";
|
||||
try {
|
||||
StringUtils.TraditionalBinaryPrefix.string2long(tooLargeNumStr);
|
||||
string2long(tooLargeNumStr);
|
||||
fail("Test passed for a number " + tooLargeNumStr + " too large");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
|
||||
|
@ -180,7 +172,7 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
|
|||
|
||||
String tooSmallNumStr = "-10e";
|
||||
try {
|
||||
StringUtils.TraditionalBinaryPrefix.string2long(tooSmallNumStr);
|
||||
string2long(tooSmallNumStr);
|
||||
fail("Test passed for a number " + tooSmallNumStr + " too small");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
|
||||
|
@ -189,7 +181,7 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
|
|||
String invalidFormatNumStr = "10kb";
|
||||
char invalidPrefix = 'b';
|
||||
try {
|
||||
StringUtils.TraditionalBinaryPrefix.string2long(invalidFormatNumStr);
|
||||
string2long(invalidFormatNumStr);
|
||||
fail("Test passed for a number " + invalidFormatNumStr
|
||||
+ " has invalid format");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -199,6 +191,74 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
|
|||
e.getMessage());
|
||||
}
|
||||
|
||||
//test long2string(..)
|
||||
assertEquals("0", long2String(0, null, 2));
|
||||
for(int decimalPlace = 0; decimalPlace < 2; decimalPlace++) {
|
||||
for(int n = 1; n < TraditionalBinaryPrefix.KILO.value; n++) {
|
||||
assertEquals(n + "", long2String(n, null, decimalPlace));
|
||||
assertEquals(-n + "", long2String(-n, null, decimalPlace));
|
||||
}
|
||||
assertEquals("1 K", long2String(1L << 10, null, decimalPlace));
|
||||
assertEquals("-1 K", long2String(-1L << 10, null, decimalPlace));
|
||||
}
|
||||
|
||||
assertEquals("8.00 E", long2String(Long.MAX_VALUE, null, 2));
|
||||
assertEquals("8.00 E", long2String(Long.MAX_VALUE - 1, null, 2));
|
||||
assertEquals("-8 E", long2String(Long.MIN_VALUE, null, 2));
|
||||
assertEquals("-8.00 E", long2String(Long.MIN_VALUE + 1, null, 2));
|
||||
|
||||
final String[] zeros = {" ", ".0 ", ".00 "};
|
||||
for(int decimalPlace = 0; decimalPlace < zeros.length; decimalPlace++) {
|
||||
final String trailingZeros = zeros[decimalPlace];
|
||||
|
||||
for(int e = 11; e < Long.SIZE - 1; e++) {
|
||||
final TraditionalBinaryPrefix p
|
||||
= TraditionalBinaryPrefix.values()[e/10 - 1];
|
||||
|
||||
{ // n = 2^e
|
||||
final long n = 1L << e;
|
||||
final String expected = (n/p.value) + " " + p.symbol;
|
||||
assertEquals("n=" + n, expected, long2String(n, null, 2));
|
||||
}
|
||||
|
||||
{ // n = 2^e + 1
|
||||
final long n = (1L << e) + 1;
|
||||
final String expected = (n/p.value) + trailingZeros + p.symbol;
|
||||
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
|
||||
}
|
||||
|
||||
{ // n = 2^e - 1
|
||||
final long n = (1L << e) - 1;
|
||||
final String expected = ((n+1)/p.value) + trailingZeros + p.symbol;
|
||||
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assertEquals("1.50 K", long2String(3L << 9, null, 2));
|
||||
assertEquals("1.5 K", long2String(3L << 9, null, 1));
|
||||
assertEquals("1.50 M", long2String(3L << 19, null, 2));
|
||||
assertEquals("2 M", long2String(3L << 19, null, 0));
|
||||
assertEquals("3 G", long2String(3L << 30, null, 2));
|
||||
|
||||
// test byteDesc(..)
|
||||
assertEquals("0 B", StringUtils.byteDesc(0));
|
||||
assertEquals("-100 B", StringUtils.byteDesc(-100));
|
||||
assertEquals("1 KB", StringUtils.byteDesc(1024));
|
||||
assertEquals("1.50 KB", StringUtils.byteDesc(3L << 9));
|
||||
assertEquals("1.50 MB", StringUtils.byteDesc(3L << 19));
|
||||
assertEquals("3 GB", StringUtils.byteDesc(3L << 30));
|
||||
|
||||
// test formatPercent(..)
|
||||
assertEquals("10%", StringUtils.formatPercent(0.1, 0));
|
||||
assertEquals("10.0%", StringUtils.formatPercent(0.1, 1));
|
||||
assertEquals("10.00%", StringUtils.formatPercent(0.1, 2));
|
||||
|
||||
assertEquals("1%", StringUtils.formatPercent(0.00543, 0));
|
||||
assertEquals("0.5%", StringUtils.formatPercent(0.00543, 1));
|
||||
assertEquals("0.54%", StringUtils.formatPercent(0.00543, 2));
|
||||
assertEquals("0.543%", StringUtils.formatPercent(0.00543, 3));
|
||||
assertEquals("0.5430%", StringUtils.formatPercent(0.00543, 4));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -314,10 +374,9 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
|
|||
}
|
||||
long et = System.nanoTime();
|
||||
if (outer > 3) {
|
||||
System.out.println(
|
||||
(useOurs ? "StringUtils impl" : "Java impl") +
|
||||
" #" + outer + ":" +
|
||||
(et - st)/1000000 + "ms");
|
||||
System.out.println( (useOurs ? "StringUtils impl" : "Java impl")
|
||||
+ " #" + outer + ":" + (et - st)/1000000 + "ms, components="
|
||||
+ components );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs.http.client;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
|
@ -86,6 +88,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
public static final String PERMISSION_PARAM = "permission";
|
||||
public static final String DESTINATION_PARAM = "destination";
|
||||
public static final String RECURSIVE_PARAM = "recursive";
|
||||
public static final String SOURCES_PARAM = "sources";
|
||||
public static final String OWNER_PARAM = "owner";
|
||||
public static final String GROUP_PARAM = "group";
|
||||
public static final String MODIFICATION_TIME_PARAM = "modificationtime";
|
||||
|
@ -167,7 +170,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
|
||||
GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET),
|
||||
INSTRUMENTATION(HTTP_GET),
|
||||
APPEND(HTTP_POST),
|
||||
APPEND(HTTP_POST), CONCAT(HTTP_POST),
|
||||
CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
|
||||
SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
|
||||
DELETE(HTTP_DELETE);
|
||||
|
@ -528,6 +531,29 @@ public class HttpFSFileSystem extends FileSystem
|
|||
HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
* Concat existing files together.
|
||||
* @param f the path to the target destination.
|
||||
* @param psrcs the paths to the sources to use for the concatenation.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public void concat(Path f, Path[] psrcs) throws IOException {
|
||||
List<String> strPaths = new ArrayList<String>(psrcs.length);
|
||||
for(Path psrc : psrcs) {
|
||||
strPaths.add(psrc.toUri().getPath());
|
||||
}
|
||||
String srcs = StringUtils.join(",", strPaths);
|
||||
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
params.put(OP_PARAM, Operation.CONCAT.toString());
|
||||
params.put(SOURCES_PARAM, srcs);
|
||||
HttpURLConnection conn = getConnection(Operation.CONCAT.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
* Renames Path src to Path dst. Can take place on local fs
|
||||
* or remote DFS.
|
||||
|
|
|
@ -198,6 +198,47 @@ public class FSOperations {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Executor that performs an append FileSystemAccess files system operation.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class FSConcat implements FileSystemAccess.FileSystemExecutor<Void> {
|
||||
private Path path;
|
||||
private Path[] sources;
|
||||
|
||||
/**
|
||||
* Creates a Concat executor.
|
||||
*
|
||||
* @param path target path to concat to.
|
||||
* @param sources comma seperated absolute paths to use as sources.
|
||||
*/
|
||||
public FSConcat(String path, String[] sources) {
|
||||
this.sources = new Path[sources.length];
|
||||
|
||||
for(int i = 0; i < sources.length; i++) {
|
||||
this.sources[i] = new Path(sources[i]);
|
||||
}
|
||||
|
||||
this.path = new Path(path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the filesystem operation.
|
||||
*
|
||||
* @param fs filesystem instance to use.
|
||||
*
|
||||
* @return void.
|
||||
*
|
||||
* @throws IOException thrown if an IO error occured.
|
||||
*/
|
||||
@Override
|
||||
public Void execute(FileSystem fs) throws IOException {
|
||||
fs.concat(path, sources);
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Executor that performs a content-summary FileSystemAccess files system operation.
|
||||
*/
|
||||
|
|
|
@ -58,6 +58,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
|
|||
PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
|
||||
PARAMS_DEF.put(Operation.APPEND,
|
||||
new Class[]{DoAsParam.class, DataParam.class});
|
||||
PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
|
||||
PARAMS_DEF.put(Operation.CREATE,
|
||||
new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class,
|
||||
ReplicationParam.class, BlockSizeParam.class, DataParam.class});
|
||||
|
@ -388,6 +389,25 @@ public class HttpFSParametersProvider extends ParametersProvider {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for concat sources parameter.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class SourcesParam extends StringParam {
|
||||
|
||||
/**
|
||||
* Parameter name.
|
||||
*/
|
||||
public static final String NAME = HttpFSFileSystem.SOURCES_PARAM;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
public SourcesParam() {
|
||||
super(NAME, null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for to-path parameter.
|
||||
*/
|
||||
|
|
|
@ -22,22 +22,23 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
|
||||
import org.apache.hadoop.lib.service.FileSystemAccess;
|
||||
import org.apache.hadoop.lib.service.FileSystemAccessException;
|
||||
import org.apache.hadoop.lib.service.Groups;
|
||||
|
@ -403,9 +404,9 @@ public class HttpFSServer {
|
|||
Response response;
|
||||
path = makeAbsolute(path);
|
||||
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
|
||||
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
|
||||
switch (op.value()) {
|
||||
case APPEND: {
|
||||
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
|
||||
Boolean hasData = params.get(DataParam.NAME, DataParam.class);
|
||||
if (!hasData) {
|
||||
response = Response.temporaryRedirect(
|
||||
|
@ -420,6 +421,18 @@ public class HttpFSServer {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case CONCAT: {
|
||||
System.out.println("HTTPFS SERVER CONCAT");
|
||||
String sources = params.get(SourcesParam.NAME, SourcesParam.class);
|
||||
|
||||
FSOperations.FSConcat command =
|
||||
new FSOperations.FSConcat(path, sources.split(","));
|
||||
fsExecute(user, null, command);
|
||||
AUDIT_LOG.info("[{}]", path);
|
||||
System.out.println("SENT RESPONSE");
|
||||
response = Response.ok().build();
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
throw new IOException(
|
||||
MessageFormat.format("Invalid HTTP POST operation [{0}]",
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.HFSTestCase;
|
||||
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
|
||||
|
@ -206,6 +208,30 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void testConcat() throws Exception {
|
||||
Configuration config = getProxiedFSConf();
|
||||
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
||||
if (!isLocalFS()) {
|
||||
FileSystem fs = FileSystem.get(config);
|
||||
fs.mkdirs(getProxiedFSTestDir());
|
||||
Path path1 = new Path("/test/foo.txt");
|
||||
Path path2 = new Path("/test/bar.txt");
|
||||
Path path3 = new Path("/test/derp.txt");
|
||||
DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
|
||||
DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
|
||||
DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
|
||||
fs.close();
|
||||
fs = getHttpFSFileSystem();
|
||||
fs.concat(path1, new Path[]{path2, path3});
|
||||
fs.close();
|
||||
fs = FileSystem.get(config);
|
||||
Assert.assertTrue(fs.exists(path1));
|
||||
Assert.assertFalse(fs.exists(path2));
|
||||
Assert.assertFalse(fs.exists(path3));
|
||||
fs.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void testRename() throws Exception {
|
||||
FileSystem fs = FileSystem.get(getProxiedFSConf());
|
||||
Path path = new Path(getProxiedFSTestDir(), "foo");
|
||||
|
@ -450,7 +476,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
}
|
||||
|
||||
protected enum Operation {
|
||||
GET, OPEN, CREATE, APPEND, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
|
||||
GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
|
||||
SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
|
||||
}
|
||||
|
||||
|
@ -468,6 +494,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
case APPEND:
|
||||
testAppend();
|
||||
break;
|
||||
case CONCAT:
|
||||
testConcat();
|
||||
case RENAME:
|
||||
testRename();
|
||||
break;
|
||||
|
|
|
@ -296,7 +296,28 @@ Trunk (Unreleased)
|
|||
|
||||
HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh)
|
||||
|
||||
Release 2.0.3-alpha - Unreleased
|
||||
HDFS-4340. Update addBlock() to inculde inode id as additional argument.
|
||||
(Brandon Li via suresh)
|
||||
|
||||
Release 2.0.4-beta - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-4470. Several HDFS tests attempt file operations on invalid HDFS
|
||||
paths when running on Windows. (Chris Nauroth via suresh)
|
||||
|
||||
HDFS-4471. Namenode WebUI file browsing does not work with wildcard
|
||||
addresses configured. (Andrew Wang via atm)
|
||||
|
||||
Release 2.0.3-alpha - 2013-02-06
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -317,6 +338,9 @@ Release 2.0.3-alpha - Unreleased
|
|||
HDFS-4451. hdfs balancer command returns exit code 1 on success instead
|
||||
of 0. (Joshua Blatt via suresh)
|
||||
|
||||
HDFS-4350. Make enabling of stale marking on read and write paths
|
||||
independent. (Andrew Wang via suresh)
|
||||
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
|
@ -505,6 +529,8 @@ Release 2.0.3-alpha - Unreleased
|
|||
|
||||
HDFS-3598. WebHDFS support for file concat. (Plamen Jeliazkov via shv)
|
||||
|
||||
HDFS-4456. Add concat to HttpFS and WebHDFS REST API docs. (plamenj2003 via tucu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3429. DataNode reads checksums even if client does not need them (todd)
|
||||
|
@ -742,6 +768,28 @@ Release 2.0.3-alpha - Unreleased
|
|||
HDFS-4428. FsDatasetImpl should disclose what the error is when a rename
|
||||
fails. (Colin Patrick McCabe via atm)
|
||||
|
||||
HDFS-4452. getAdditionalBlock() can create multiple blocks if the client
|
||||
times out and retries. (shv)
|
||||
|
||||
HDFS-4445. All BKJM ledgers are not checked while tailing, So failover will fail.
|
||||
(Vinay via umamahesh)
|
||||
|
||||
HDFS-4462. 2NN will fail to checkpoint after an HDFS upgrade from a
|
||||
pre-federation version of HDFS. (atm)
|
||||
|
||||
HDFS-4404. Create file failure when the machine of first attempted NameNode
|
||||
is down. (Todd Lipcon via atm)
|
||||
|
||||
HDFS-4344. dfshealth.jsp throws NumberFormatException when
|
||||
dfs.hosts/dfs.hosts.exclude includes port number. (Andy Isaacson via atm)
|
||||
|
||||
HDFS-4468. Use the new StringUtils methods added by HADOOP-9252 and fix
|
||||
TestHDFSCLI and TestQuota. (szetszwo)
|
||||
|
||||
HDFS-4458. In DFSUtil.getNameServiceUris(..), convert default fs URI using
|
||||
NetUtils.createSocketAddr(..) for being consistent with other addresses.
|
||||
(Binglin Chang via szetszwo)
|
||||
|
||||
BREAKDOWN OF HDFS-3077 SUBTASKS
|
||||
|
||||
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
|
||||
|
|
|
@ -503,7 +503,8 @@ public class BookKeeperJournalManager implements JournalManager {
|
|||
@Override
|
||||
public void selectInputStreams(Collection<EditLogInputStream> streams,
|
||||
long fromTxId, boolean inProgressOk) throws IOException {
|
||||
List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(inProgressOk);
|
||||
List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(fromTxId,
|
||||
inProgressOk);
|
||||
try {
|
||||
BookKeeperEditLogInputStream elis = null;
|
||||
for (EditLogLedgerMetadata l : currentLedgerList) {
|
||||
|
@ -511,6 +512,8 @@ public class BookKeeperJournalManager implements JournalManager {
|
|||
if (l.isInProgress()) {
|
||||
lastTxId = recoverLastTxId(l, false);
|
||||
}
|
||||
// Check once again, required in case of InProgress and is case of any
|
||||
// gap.
|
||||
if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
|
||||
LedgerHandle h;
|
||||
if (l.isInProgress()) { // we don't want to fence the current journal
|
||||
|
@ -523,6 +526,8 @@ public class BookKeeperJournalManager implements JournalManager {
|
|||
elis = new BookKeeperEditLogInputStream(h, l);
|
||||
elis.skipTo(fromTxId);
|
||||
} else {
|
||||
// If mismatches then there might be some gap, so we should not check
|
||||
// further.
|
||||
return;
|
||||
}
|
||||
streams.add(elis);
|
||||
|
@ -732,6 +737,11 @@ public class BookKeeperJournalManager implements JournalManager {
|
|||
*/
|
||||
List<EditLogLedgerMetadata> getLedgerList(boolean inProgressOk)
|
||||
throws IOException {
|
||||
return getLedgerList(-1, inProgressOk);
|
||||
}
|
||||
|
||||
private List<EditLogLedgerMetadata> getLedgerList(long fromTxId,
|
||||
boolean inProgressOk) throws IOException {
|
||||
List<EditLogLedgerMetadata> ledgers
|
||||
= new ArrayList<EditLogLedgerMetadata>();
|
||||
try {
|
||||
|
@ -744,6 +754,12 @@ public class BookKeeperJournalManager implements JournalManager {
|
|||
try {
|
||||
EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata
|
||||
.read(zkc, legderMetadataPath);
|
||||
if (editLogLedgerMetadata.getLastTxId() != HdfsConstants.INVALID_TXID
|
||||
&& editLogLedgerMetadata.getLastTxId() < fromTxId) {
|
||||
// exclude already read closed edits, but include inprogress edits
|
||||
// as this will be handled in caller
|
||||
continue;
|
||||
}
|
||||
ledgers.add(editLogLedgerMetadata);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
LOG.warn("ZNode: " + legderMetadataPath
|
||||
|
|
|
@ -21,7 +21,6 @@ import static org.junit.Assert.*;
|
|||
|
||||
import org.junit.Test;
|
||||
import org.junit.Before;
|
||||
import org.junit.After;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.AfterClass;
|
||||
|
||||
|
@ -34,11 +33,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -352,4 +349,42 @@ public class TestBookKeeperAsHASharedDir {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* NameNode should load the edits correctly if the applicable edits are
|
||||
* present in the BKJM.
|
||||
*/
|
||||
@Test
|
||||
public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
|
||||
.createJournalURI("/correctEditLogSelection").toString());
|
||||
BKJMUtil.addJournalManagerDefinition(conf);
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
|
||||
.manageNameDfsSharedDirs(false).build();
|
||||
NameNode nn1 = cluster.getNameNode(0);
|
||||
NameNode nn2 = cluster.getNameNode(1);
|
||||
cluster.waitActive();
|
||||
cluster.transitionToActive(0);
|
||||
nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active.
|
||||
// Transition to standby current active gracefully.
|
||||
cluster.transitionToStandby(0);
|
||||
// Make the other Active and Roll edits multiple times
|
||||
cluster.transitionToActive(1);
|
||||
nn2.getRpcServer().rollEditLog();
|
||||
nn2.getRpcServer().rollEditLog();
|
||||
// Now One more failover. So NN1 should be able to failover successfully.
|
||||
cluster.transitionToStandby(1);
|
||||
cluster.transitionToActive(0);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -181,10 +181,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
|
||||
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
|
||||
|
||||
// Whether to enable datanode's stale state detection and usage
|
||||
public static final String DFS_NAMENODE_CHECK_STALE_DATANODE_KEY = "dfs.namenode.check.stale.datanode";
|
||||
public static final boolean DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT = false;
|
||||
// Whether to enable datanode's stale state detection and usage
|
||||
// Whether to enable datanode's stale state detection and usage for reads
|
||||
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
|
||||
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;
|
||||
// Whether to enable datanode's stale state detection and usage for writes
|
||||
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = "dfs.namenode.avoid.write.stale.datanode";
|
||||
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false;
|
||||
// The default value of the time interval for marking datanodes as stale
|
||||
|
@ -195,8 +195,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_KEY = "dfs.namenode.stale.datanode.minimum.interval";
|
||||
public static final int DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT = 3; // i.e. min_interval is 3 * heartbeat_interval = 9s
|
||||
|
||||
// When the number stale datanodes marked as stale reached this certian ratio,
|
||||
// stop avoiding writing to stale nodes so as to prevent causing hotspots.
|
||||
// When the percentage of stale datanodes reaches this ratio,
|
||||
// allow writing to stale nodes to prevent hotspots.
|
||||
public static final String DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY = "dfs.namenode.write.stale.datanode.ratio";
|
||||
public static final float DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT = 0.5f;
|
||||
|
||||
|
|
|
@ -116,6 +116,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
private volatile boolean closed = false;
|
||||
|
||||
private String src;
|
||||
private final long fileId;
|
||||
private final long blockSize;
|
||||
private final DataChecksum checksum;
|
||||
// both dataQueue and ackQueue are protected by dataQueue lock
|
||||
|
@ -1149,7 +1150,8 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
long localstart = Time.now();
|
||||
while (true) {
|
||||
try {
|
||||
return dfsClient.namenode.addBlock(src, dfsClient.clientName, block, excludedNodes);
|
||||
return dfsClient.namenode.addBlock(src, dfsClient.clientName,
|
||||
block, excludedNodes, fileId);
|
||||
} catch (RemoteException e) {
|
||||
IOException ue =
|
||||
e.unwrapRemoteException(FileNotFoundException.class,
|
||||
|
@ -1262,20 +1264,21 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
return value;
|
||||
}
|
||||
|
||||
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize, Progressable progress,
|
||||
DataChecksum checksum, short replication) throws IOException {
|
||||
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
|
||||
HdfsFileStatus stat, DataChecksum checksum) throws IOException {
|
||||
super(checksum, checksum.getBytesPerChecksum(), checksum.getChecksumSize());
|
||||
int bytesPerChecksum = checksum.getBytesPerChecksum();
|
||||
this.dfsClient = dfsClient;
|
||||
this.src = src;
|
||||
this.blockSize = blockSize;
|
||||
this.blockReplication = replication;
|
||||
this.fileId = stat.getFileId();
|
||||
this.blockSize = stat.getBlockSize();
|
||||
this.blockReplication = stat.getReplication();
|
||||
this.progress = progress;
|
||||
if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
|
||||
DFSClient.LOG.debug(
|
||||
"Set non-null progress callback on DFSOutputStream " + src);
|
||||
}
|
||||
|
||||
final int bytesPerChecksum = checksum.getBytesPerChecksum();
|
||||
if ( bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
|
||||
throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum +
|
||||
") and blockSize(" + blockSize +
|
||||
|
@ -1287,19 +1290,27 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
}
|
||||
|
||||
/** Construct a new output stream for creating a file. */
|
||||
private DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked,
|
||||
EnumSet<CreateFlag> flag, boolean createParent, short replication,
|
||||
long blockSize, Progressable progress, int buffersize,
|
||||
private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat,
|
||||
EnumSet<CreateFlag> flag, Progressable progress,
|
||||
DataChecksum checksum) throws IOException {
|
||||
this(dfsClient, src, blockSize, progress, checksum, replication);
|
||||
this(dfsClient, src, progress, stat, checksum);
|
||||
this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);
|
||||
|
||||
computePacketChunkSize(dfsClient.getConf().writePacketSize,
|
||||
checksum.getBytesPerChecksum());
|
||||
|
||||
streamer = new DataStreamer();
|
||||
}
|
||||
|
||||
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
|
||||
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize, Progressable progress, int buffersize,
|
||||
DataChecksum checksum) throws IOException {
|
||||
final HdfsFileStatus stat;
|
||||
try {
|
||||
dfsClient.namenode.create(
|
||||
src, masked, dfsClient.clientName, new EnumSetWritable<CreateFlag>(flag), createParent, replication, blockSize);
|
||||
stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
|
||||
new EnumSetWritable<CreateFlag>(flag), createParent, replication,
|
||||
blockSize);
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
DSQuotaExceededException.class,
|
||||
|
@ -1311,30 +1322,20 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
UnresolvedPathException.class,
|
||||
SnapshotAccessControlException.class);
|
||||
}
|
||||
streamer = new DataStreamer();
|
||||
}
|
||||
|
||||
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
|
||||
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize, Progressable progress, int buffersize,
|
||||
DataChecksum checksum) throws IOException {
|
||||
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, masked,
|
||||
flag, createParent, replication, blockSize, progress, buffersize,
|
||||
checksum);
|
||||
out.streamer.start();
|
||||
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
|
||||
flag, progress, checksum);
|
||||
out.start();
|
||||
return out;
|
||||
}
|
||||
|
||||
/** Construct a new output stream for append. */
|
||||
private DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
|
||||
LocatedBlock lastBlock, HdfsFileStatus stat,
|
||||
private DFSOutputStream(DFSClient dfsClient, String src,
|
||||
Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat,
|
||||
DataChecksum checksum) throws IOException {
|
||||
this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication());
|
||||
this(dfsClient, src, progress, stat, checksum);
|
||||
initialFileSize = stat.getLen(); // length of file when opened
|
||||
|
||||
//
|
||||
// The last partial block of the file has to be filled.
|
||||
//
|
||||
if (lastBlock != null) {
|
||||
// indicate that we are appending to an existing block
|
||||
bytesCurBlock = lastBlock.getBlockSize();
|
||||
|
@ -1349,9 +1350,9 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
|
||||
int buffersize, Progressable progress, LocatedBlock lastBlock,
|
||||
HdfsFileStatus stat, DataChecksum checksum) throws IOException {
|
||||
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, buffersize,
|
||||
final DFSOutputStream out = new DFSOutputStream(dfsClient, src,
|
||||
progress, lastBlock, stat, checksum);
|
||||
out.streamer.start();
|
||||
out.start();
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -1718,6 +1719,10 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
isClosed();
|
||||
}
|
||||
|
||||
private synchronized void start() {
|
||||
streamer.start();
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts this output stream and releases any system
|
||||
* resources associated with this stream.
|
||||
|
|
|
@ -134,7 +134,7 @@ public class DFSUtil {
|
|||
/**
|
||||
* Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
|
||||
* Decommissioned/stale nodes are moved to the end of the array on sorting
|
||||
* with this compartor.
|
||||
* with this comparator.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
|
||||
|
@ -144,7 +144,7 @@ public class DFSUtil {
|
|||
* Constructor of DecomStaleComparator
|
||||
*
|
||||
* @param interval
|
||||
* The time invertal for marking datanodes as stale is passed from
|
||||
* The time interval for marking datanodes as stale is passed from
|
||||
* outside, since the interval may be changed dynamically
|
||||
*/
|
||||
public DecomStaleComparator(long interval) {
|
||||
|
@ -780,6 +780,13 @@ public class DFSUtil {
|
|||
|
||||
// Add the default URI if it is an HDFS URI.
|
||||
URI defaultUri = FileSystem.getDefaultUri(conf);
|
||||
// checks if defaultUri is ip:port format
|
||||
// and convert it to hostname:port format
|
||||
if (defaultUri != null && (defaultUri.getPort() != -1)) {
|
||||
defaultUri = createUri(defaultUri.getScheme(),
|
||||
NetUtils.createSocketAddr(defaultUri.getHost(),
|
||||
defaultUri.getPort()));
|
||||
}
|
||||
if (defaultUri != null &&
|
||||
HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
|
||||
!nonPreferredUris.contains(defaultUri)) {
|
||||
|
@ -939,6 +946,11 @@ public class DFSUtil {
|
|||
return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
|
||||
}
|
||||
|
||||
/** Convert percentage to a string. */
|
||||
public static String percent2String(double percentage) {
|
||||
return StringUtils.format("%.2f%%", percentage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Round bytes to GiB (gibibyte)
|
||||
* @param bytes number of bytes
|
||||
|
|
|
@ -151,6 +151,8 @@ public interface ClientProtocol {
|
|||
* @param replication block replication factor.
|
||||
* @param blockSize maximum block size.
|
||||
*
|
||||
* @return the status of the created file, it could be null if the server
|
||||
* doesn't support returning the file status
|
||||
* @throws AccessControlException If access is denied
|
||||
* @throws AlreadyBeingCreatedException if the path does not exist.
|
||||
* @throws DSQuotaExceededException If file creation violates disk space
|
||||
|
@ -170,13 +172,14 @@ public interface ClientProtocol {
|
|||
* RuntimeExceptions:
|
||||
* @throws InvalidPathException Path <code>src</code> is invalid
|
||||
*/
|
||||
public void create(String src, FsPermission masked, String clientName,
|
||||
EnumSetWritable<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize) throws AccessControlException,
|
||||
AlreadyBeingCreatedException, DSQuotaExceededException,
|
||||
FileAlreadyExistsException, FileNotFoundException,
|
||||
NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
|
||||
UnresolvedLinkException, SnapshotAccessControlException, IOException;
|
||||
public HdfsFileStatus create(String src, FsPermission masked,
|
||||
String clientName, EnumSetWritable<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize)
|
||||
throws AccessControlException, AlreadyBeingCreatedException,
|
||||
DSQuotaExceededException, FileAlreadyExistsException,
|
||||
FileNotFoundException, NSQuotaExceededException,
|
||||
ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
|
||||
SnapshotAccessControlException, IOException;
|
||||
|
||||
/**
|
||||
* Append to the end of the file.
|
||||
|
@ -302,6 +305,7 @@ public interface ClientProtocol {
|
|||
* @param previous previous block
|
||||
* @param excludeNodes a list of nodes that should not be
|
||||
* allocated for the current block
|
||||
* @param fileId the id uniquely identifying a file
|
||||
*
|
||||
* @return LocatedBlock allocated block information.
|
||||
*
|
||||
|
@ -316,7 +320,7 @@ public interface ClientProtocol {
|
|||
*/
|
||||
@Idempotent
|
||||
public LocatedBlock addBlock(String src, String clientName,
|
||||
ExtendedBlock previous, DatanodeInfo[] excludeNodes)
|
||||
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId)
|
||||
throws AccessControlException, FileNotFoundException,
|
||||
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
|
||||
IOException;
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.protocol;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
|
@ -41,9 +41,9 @@ public class DSQuotaExceededException extends QuotaExceededException {
|
|||
public String getMessage() {
|
||||
String msg = super.getMessage();
|
||||
if (msg == null) {
|
||||
return "The DiskSpace quota" + (pathName==null?"":(" of " + pathName)) +
|
||||
" is exceeded: quota=" + StringUtils.humanReadableInt(quota) +
|
||||
" diskspace consumed=" + StringUtils.humanReadableInt(count);
|
||||
return "The DiskSpace quota" + (pathName==null?"": " of " + pathName)
|
||||
+ " is exceeded: quota = " + quota + " B = " + long2String(quota, "B", 2)
|
||||
+ " but diskspace consumed = " + count + " B = " + long2String(count, "B", 2);
|
||||
} else {
|
||||
return msg;
|
||||
}
|
||||
|
|
|
@ -17,10 +17,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
|
@ -244,8 +247,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
|
||||
buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
|
||||
buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
|
||||
buffer.append("DFS Used%: "+StringUtils.limitDecimalTo2(usedPercent)+"%\n");
|
||||
buffer.append("DFS Remaining%: "+StringUtils.limitDecimalTo2(remainingPercent)+"%\n");
|
||||
buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
|
||||
buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
|
||||
buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
|
||||
return buffer.toString();
|
||||
}
|
||||
|
@ -269,7 +272,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
}
|
||||
buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
|
||||
buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
|
||||
buffer.append(" " + StringUtils.limitDecimalTo2(((1.0*u)/c)*100)+"%");
|
||||
buffer.append(" " + percent2String(u/(double)c));
|
||||
buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
|
||||
buffer.append(" " + new Date(lastUpdate));
|
||||
return buffer.toString();
|
||||
|
|
|
@ -40,6 +40,7 @@ public class HdfsFileStatus {
|
|||
private FsPermission permission;
|
||||
private String owner;
|
||||
private String group;
|
||||
private long fileId;
|
||||
|
||||
public static final byte[] EMPTY_NAME = new byte[0];
|
||||
|
||||
|
@ -55,11 +56,12 @@ public class HdfsFileStatus {
|
|||
* @param owner the owner of the path
|
||||
* @param group the group of the path
|
||||
* @param path the local name in java UTF8 encoding the same as that in-memory
|
||||
* @param fileId the file id
|
||||
*/
|
||||
public HdfsFileStatus(long length, boolean isdir, int block_replication,
|
||||
long blocksize, long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path) {
|
||||
byte[] symlink, byte[] path, long fileId) {
|
||||
this.length = length;
|
||||
this.isdir = isdir;
|
||||
this.block_replication = (short)block_replication;
|
||||
|
@ -75,6 +77,7 @@ public class HdfsFileStatus {
|
|||
this.group = (group == null) ? "" : group;
|
||||
this.symlink = symlink;
|
||||
this.path = path;
|
||||
this.fileId = fileId;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -223,4 +226,8 @@ public class HdfsFileStatus {
|
|||
final public byte[] getSymlinkInBytes() {
|
||||
return symlink;
|
||||
}
|
||||
|
||||
final public long getFileId() {
|
||||
return fileId;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,19 +44,19 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
|||
* @param group group
|
||||
* @param symlink symbolic link
|
||||
* @param path local path name in java UTF8 format
|
||||
* @param fileId the file id
|
||||
* @param locations block locations
|
||||
*/
|
||||
public HdfsLocatedFileStatus(long length, boolean isdir,
|
||||
int block_replication,
|
||||
long blocksize, long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path, LocatedBlocks locations) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path);
|
||||
int block_replication, long blocksize, long modification_time,
|
||||
long access_time, FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path, fileId);
|
||||
this.locations = locations;
|
||||
}
|
||||
}
|
||||
|
||||
public LocatedBlocks getBlockLocations() {
|
||||
return locations;
|
||||
}
|
||||
public LocatedBlocks getBlockLocations() {
|
||||
return locations;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,8 +40,9 @@ public class SnapshottableDirectoryStatus {
|
|||
public SnapshottableDirectoryStatus(long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group, byte[] localName,
|
||||
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
|
||||
//TODO: fix fileId
|
||||
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
|
||||
access_time, permission, owner, group, null, localName);
|
||||
access_time, permission, owner, group, null, localName, 0L);
|
||||
this.snapshotNumber = snapshotNumber;
|
||||
this.snapshotQuota = snapshotQuota;
|
||||
this.parentFullPath = parentFullPath;
|
||||
|
|
|
@ -297,14 +297,19 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
public CreateResponseProto create(RpcController controller,
|
||||
CreateRequestProto req) throws ServiceException {
|
||||
try {
|
||||
server.create(req.getSrc(), PBHelper.convert(req.getMasked()),
|
||||
req.getClientName(), PBHelper.convert(req.getCreateFlag()),
|
||||
req.getCreateParent(), (short) req.getReplication(),
|
||||
req.getBlockSize());
|
||||
HdfsFileStatus result = server.create(req.getSrc(),
|
||||
PBHelper.convert(req.getMasked()), req.getClientName(),
|
||||
PBHelper.convert(req.getCreateFlag()), req.getCreateParent(),
|
||||
(short) req.getReplication(), req.getBlockSize());
|
||||
|
||||
if (result != null) {
|
||||
return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result))
|
||||
.build();
|
||||
}
|
||||
return VOID_CREATE_RESPONSE;
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return VOID_CREATE_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -377,13 +382,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
|
||||
try {
|
||||
List<DatanodeInfoProto> excl = req.getExcludeNodesList();
|
||||
LocatedBlock result = server.addBlock(req.getSrc(), req.getClientName(),
|
||||
LocatedBlock result = server.addBlock(
|
||||
req.getSrc(),
|
||||
req.getClientName(),
|
||||
req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
|
||||
(excl == null ||
|
||||
excl.size() == 0) ? null :
|
||||
PBHelper.convert(excl.toArray(new DatanodeInfoProto[excl.size()])));
|
||||
return AddBlockResponseProto.newBuilder().setBlock(
|
||||
PBHelper.convert(result)).build();
|
||||
(excl == null || excl.size() == 0) ? null : PBHelper.convert(excl
|
||||
.toArray(new DatanodeInfoProto[excl.size()])), req.getFileId());
|
||||
return AddBlockResponseProto.newBuilder()
|
||||
.setBlock(PBHelper.convert(result)).build();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Comple
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
|
||||
|
@ -110,6 +111,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
|
@ -204,13 +206,14 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public void create(String src, FsPermission masked, String clientName,
|
||||
EnumSetWritable<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize) throws AccessControlException,
|
||||
AlreadyBeingCreatedException, DSQuotaExceededException,
|
||||
FileAlreadyExistsException, FileNotFoundException,
|
||||
NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
|
||||
UnresolvedLinkException, IOException {
|
||||
public HdfsFileStatus create(String src, FsPermission masked,
|
||||
String clientName, EnumSetWritable<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize)
|
||||
throws AccessControlException, AlreadyBeingCreatedException,
|
||||
DSQuotaExceededException, FileAlreadyExistsException,
|
||||
FileNotFoundException, NSQuotaExceededException,
|
||||
ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
|
||||
IOException {
|
||||
CreateRequestProto req = CreateRequestProto.newBuilder()
|
||||
.setSrc(src)
|
||||
.setMasked(PBHelper.convert(masked))
|
||||
|
@ -221,7 +224,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
.setBlockSize(blockSize)
|
||||
.build();
|
||||
try {
|
||||
rpcProxy.create(null, req);
|
||||
CreateResponseProto res = rpcProxy.create(null, req);
|
||||
return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
|
@ -305,15 +309,15 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public LocatedBlock addBlock(String src, String clientName,
|
||||
ExtendedBlock previous, DatanodeInfo[] excludeNodes)
|
||||
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId)
|
||||
throws AccessControlException, FileNotFoundException,
|
||||
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
|
||||
IOException {
|
||||
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder().setSrc(src)
|
||||
.setClientName(clientName);
|
||||
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
|
||||
.setSrc(src).setClientName(clientName).setFileId(fileId);
|
||||
if (previous != null)
|
||||
req.setPrevious(PBHelper.convert(previous));
|
||||
if (excludeNodes != null)
|
||||
|
|
|
@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
||||
|
@ -1052,6 +1053,7 @@ public class PBHelper {
|
|||
fs.getFileType().equals(FileType.IS_SYMLINK) ?
|
||||
fs.getSymlink().toByteArray() : null,
|
||||
fs.getPath().toByteArray(),
|
||||
fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
|
||||
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null);
|
||||
}
|
||||
|
||||
|
@ -1091,6 +1093,7 @@ public class PBHelper {
|
|||
setPermission(PBHelper.convert(fs.getPermission())).
|
||||
setOwner(fs.getOwner()).
|
||||
setGroup(fs.getGroup()).
|
||||
setFileId(fs.getFileId()).
|
||||
setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
|
||||
if (fs.isSymlink()) {
|
||||
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
|
||||
|
|
|
@ -126,15 +126,26 @@ public class DatanodeManager {
|
|||
private final long heartbeatExpireInterval;
|
||||
/** Ask Datanode only up to this many blocks to delete. */
|
||||
final int blockInvalidateLimit;
|
||||
|
||||
/** Whether or not to check stale DataNodes for read/write */
|
||||
private final boolean checkForStaleDataNodes;
|
||||
|
||||
/** The interval for judging stale DataNodes for read/write */
|
||||
private final long staleInterval;
|
||||
|
||||
/** Whether or not to avoid using stale DataNodes for writing */
|
||||
private volatile boolean avoidStaleDataNodesForWrite;
|
||||
/** Whether or not to avoid using stale DataNodes for reading */
|
||||
private final boolean avoidStaleDataNodesForRead;
|
||||
|
||||
/**
|
||||
* Whether or not to avoid using stale DataNodes for writing.
|
||||
* Note that, even if this is configured, the policy may be
|
||||
* temporarily disabled when a high percentage of the nodes
|
||||
* are marked as stale.
|
||||
*/
|
||||
private final boolean avoidStaleDataNodesForWrite;
|
||||
|
||||
/**
|
||||
* When the ratio of stale datanodes reaches this number, stop avoiding
|
||||
* writing to stale datanodes, i.e., continue using stale nodes for writing.
|
||||
*/
|
||||
private final float ratioUseStaleDataNodesForWrite;
|
||||
|
||||
/** The number of stale DataNodes */
|
||||
private volatile int numStaleNodes;
|
||||
|
@ -183,14 +194,23 @@ public class DatanodeManager {
|
|||
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
|
||||
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
|
||||
+ "=" + this.blockInvalidateLimit);
|
||||
|
||||
checkForStaleDataNodes = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT);
|
||||
|
||||
staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
|
||||
avoidStaleDataNodesForWrite = getAvoidStaleForWriteFromConf(conf,
|
||||
checkForStaleDataNodes);
|
||||
this.avoidStaleDataNodesForRead = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
|
||||
this.avoidStaleDataNodesForWrite = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
|
||||
this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
|
||||
this.ratioUseStaleDataNodesForWrite = conf.getFloat(
|
||||
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
|
||||
Preconditions.checkArgument(
|
||||
(ratioUseStaleDataNodesForWrite > 0 &&
|
||||
ratioUseStaleDataNodesForWrite <= 1.0f),
|
||||
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
|
||||
" = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
|
||||
"It should be a positive non-zero float value, not greater than 1.0f.");
|
||||
}
|
||||
|
||||
private static long getStaleIntervalFromConf(Configuration conf,
|
||||
|
@ -230,22 +250,6 @@ public class DatanodeManager {
|
|||
return staleInterval;
|
||||
}
|
||||
|
||||
static boolean getAvoidStaleForWriteFromConf(Configuration conf,
|
||||
boolean checkForStale) {
|
||||
boolean avoid = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
|
||||
boolean avoidStaleDataNodesForWrite = checkForStale && avoid;
|
||||
if (!checkForStale && avoid) {
|
||||
LOG.warn("Cannot set "
|
||||
+ DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY
|
||||
+ " as false while setting "
|
||||
+ DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY
|
||||
+ " as true.");
|
||||
}
|
||||
return avoidStaleDataNodesForWrite;
|
||||
}
|
||||
|
||||
void activate(final Configuration conf) {
|
||||
final DecommissionManager dm = new DecommissionManager(namesystem, blockManager);
|
||||
this.decommissionthread = new Daemon(dm.new Monitor(
|
||||
|
@ -299,7 +303,7 @@ public class DatanodeManager {
|
|||
client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
|
||||
}
|
||||
|
||||
Comparator<DatanodeInfo> comparator = checkForStaleDataNodes ?
|
||||
Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ?
|
||||
new DFSUtil.DecomStaleComparator(staleInterval) :
|
||||
DFSUtil.DECOM_COMPARATOR;
|
||||
|
||||
|
@ -825,32 +829,20 @@ public class DatanodeManager {
|
|||
}
|
||||
|
||||
/* Getter and Setter for stale DataNodes related attributes */
|
||||
|
||||
/**
|
||||
* @return whether or not to avoid writing to stale datanodes
|
||||
*/
|
||||
public boolean isAvoidingStaleDataNodesForWrite() {
|
||||
return avoidStaleDataNodesForWrite;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of {@link DatanodeManager#avoidStaleDataNodesForWrite}.
|
||||
* The HeartbeatManager disable avoidStaleDataNodesForWrite when more than
|
||||
* half of the DataNodes are marked as stale.
|
||||
* Whether stale datanodes should be avoided as targets on the write path.
|
||||
* The result of this function may change if the number of stale datanodes
|
||||
* eclipses a configurable threshold.
|
||||
*
|
||||
* @param avoidStaleDataNodesForWrite
|
||||
* The value to set to
|
||||
* {@link DatanodeManager#avoidStaleDataNodesForWrite}
|
||||
* @return whether stale datanodes should be avoided on the write path
|
||||
*/
|
||||
void setAvoidStaleDataNodesForWrite(boolean avoidStaleDataNodesForWrite) {
|
||||
this.avoidStaleDataNodesForWrite = avoidStaleDataNodesForWrite;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether or not to check stale DataNodes for R/W
|
||||
*/
|
||||
boolean isCheckingForStaleDataNodes() {
|
||||
return checkForStaleDataNodes;
|
||||
public boolean shouldAvoidStaleDataNodesForWrite() {
|
||||
// If # stale exceeds maximum staleness ratio, disable stale
|
||||
// datanode avoidance on the write path
|
||||
return avoidStaleDataNodesForWrite &&
|
||||
(numStaleNodes <= heartbeatManager.getLiveDatanodeCount()
|
||||
* ratioUseStaleDataNodesForWrite);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -967,7 +959,7 @@ public class DatanodeManager {
|
|||
port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT;
|
||||
} else {
|
||||
hostStr = hostLine.substring(0, idx);
|
||||
port = Integer.valueOf(hostLine.substring(idx));
|
||||
port = Integer.valueOf(hostLine.substring(idx+1));
|
||||
}
|
||||
|
||||
if (InetAddresses.isInetAddress(hostStr)) {
|
||||
|
|
|
@ -30,8 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
|||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Manage the heartbeats received from datanodes.
|
||||
* The datanode list and statistics are synchronized
|
||||
|
@ -56,16 +54,7 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
private final long heartbeatRecheckInterval;
|
||||
/** Heartbeat monitor thread */
|
||||
private final Daemon heartbeatThread = new Daemon(new Monitor());
|
||||
/**
|
||||
* The initial setting of end user which indicates whether or not to avoid
|
||||
* writing to stale datanodes.
|
||||
*/
|
||||
private final boolean initialAvoidWriteStaleNodes;
|
||||
/**
|
||||
* When the ratio of stale datanodes reaches this number, stop avoiding
|
||||
* writing to stale datanodes, i.e., continue using stale nodes for writing.
|
||||
*/
|
||||
private final float ratioUseStaleDataNodesForWrite;
|
||||
|
||||
|
||||
final Namesystem namesystem;
|
||||
final BlockManager blockManager;
|
||||
|
@ -74,30 +63,25 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
final BlockManager blockManager, final Configuration conf) {
|
||||
this.namesystem = namesystem;
|
||||
this.blockManager = blockManager;
|
||||
boolean checkStaleNodes = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT);
|
||||
boolean avoidStaleDataNodesForWrite = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
|
||||
long recheckInterval = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 min
|
||||
long staleInterval = conf.getLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);// 30s
|
||||
this.initialAvoidWriteStaleNodes = DatanodeManager
|
||||
.getAvoidStaleForWriteFromConf(conf, checkStaleNodes);
|
||||
this.ratioUseStaleDataNodesForWrite = conf.getFloat(
|
||||
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
|
||||
Preconditions.checkArgument(
|
||||
(ratioUseStaleDataNodesForWrite > 0 &&
|
||||
ratioUseStaleDataNodesForWrite <= 1.0f),
|
||||
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
|
||||
" = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
|
||||
"It should be a positive non-zero float value, not greater than 1.0f.");
|
||||
|
||||
this.heartbeatRecheckInterval = (checkStaleNodes
|
||||
&& initialAvoidWriteStaleNodes
|
||||
&& staleInterval < recheckInterval) ? staleInterval : recheckInterval;
|
||||
|
||||
if (avoidStaleDataNodesForWrite && staleInterval < recheckInterval) {
|
||||
this.heartbeatRecheckInterval = staleInterval;
|
||||
LOG.info("Setting heartbeat recheck interval to " + staleInterval
|
||||
+ " since " + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY
|
||||
+ " is less than "
|
||||
+ DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
|
||||
} else {
|
||||
this.heartbeatRecheckInterval = recheckInterval;
|
||||
}
|
||||
}
|
||||
|
||||
void activate(Configuration conf) {
|
||||
|
@ -242,7 +226,6 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
if (namesystem.isInSafeMode()) {
|
||||
return;
|
||||
}
|
||||
boolean checkStaleNodes = dm.isCheckingForStaleDataNodes();
|
||||
boolean allAlive = false;
|
||||
while (!allAlive) {
|
||||
// locate the first dead node.
|
||||
|
@ -254,29 +237,14 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
if (dead == null && dm.isDatanodeDead(d)) {
|
||||
stats.incrExpiredHeartbeats();
|
||||
dead = d;
|
||||
if (!checkStaleNodes) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (checkStaleNodes &&
|
||||
d.isStale(dm.getStaleInterval())) {
|
||||
if (d.isStale(dm.getStaleInterval())) {
|
||||
numOfStaleNodes++;
|
||||
}
|
||||
}
|
||||
|
||||
// Change whether to avoid using stale datanodes for writing
|
||||
// based on proportion of stale datanodes
|
||||
if (checkStaleNodes) {
|
||||
dm.setNumStaleNodes(numOfStaleNodes);
|
||||
if (numOfStaleNodes >
|
||||
datanodes.size() * ratioUseStaleDataNodesForWrite) {
|
||||
dm.setAvoidStaleDataNodesForWrite(false);
|
||||
} else {
|
||||
if (this.initialAvoidWriteStaleNodes) {
|
||||
dm.setAvoidStaleDataNodesForWrite(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set the number of stale nodes in the DatanodeManager
|
||||
dm.setNumStaleNodes(numOfStaleNodes);
|
||||
}
|
||||
|
||||
allAlive = dead == null;
|
||||
|
|
|
@ -905,7 +905,7 @@ public abstract class Storage extends StorageInfo {
|
|||
props.setProperty("storageType", storageType.toString());
|
||||
props.setProperty("namespaceID", String.valueOf(namespaceID));
|
||||
// Set clusterID in version with federation support
|
||||
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
||||
if (versionSupportsFederation()) {
|
||||
props.setProperty("clusterID", clusterID);
|
||||
}
|
||||
props.setProperty("cTime", String.valueOf(cTime));
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
package org.apache.hadoop.hdfs.server.common;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
|
@ -77,6 +79,10 @@ public class StorageInfo {
|
|||
namespaceID = from.namespaceID;
|
||||
cTime = from.cTime;
|
||||
}
|
||||
|
||||
public boolean versionSupportsFederation() {
|
||||
return LayoutVersion.supports(Feature.FEDERATION, layoutVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
|
|
|
@ -123,6 +123,10 @@ public class CheckpointSignature extends StorageInfo
|
|||
blockpoolID.equals(si.getBlockPoolID());
|
||||
}
|
||||
|
||||
boolean namespaceIdMatches(FSImage si) {
|
||||
return namespaceID == si.getStorage().namespaceID;
|
||||
}
|
||||
|
||||
void validateStorageInfo(FSImage si) throws IOException {
|
||||
if (!isSameCluster(si)
|
||||
|| !storageVersionMatches(si.getStorage())) {
|
||||
|
|
|
@ -569,12 +569,10 @@ class ClusterJspHelper {
|
|||
toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));
|
||||
|
||||
// dfsUsedPercent
|
||||
toXmlItemBlock(doc, "DFS Used%",
|
||||
StringUtils.limitDecimalTo2(dfsUsedPercent)+ "%");
|
||||
toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent));
|
||||
|
||||
// dfsRemainingPercent
|
||||
toXmlItemBlock(doc, "DFS Remaining%",
|
||||
StringUtils.limitDecimalTo2(dfsRemainingPercent) + "%");
|
||||
toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent));
|
||||
|
||||
doc.endTag(); // storage
|
||||
|
||||
|
|
|
@ -2157,7 +2157,8 @@ public class FSDirectory implements Closeable {
|
|||
node.getUserName(snapshot),
|
||||
node.getGroupName(snapshot),
|
||||
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
|
||||
path);
|
||||
path,
|
||||
node.getId());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2194,6 +2195,7 @@ public class FSDirectory implements Closeable {
|
|||
node.getGroupName(snapshot),
|
||||
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
|
||||
path,
|
||||
node.getId(),
|
||||
loc);
|
||||
}
|
||||
|
||||
|
|
|
@ -1791,16 +1791,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* Create a new file entry in the namespace.
|
||||
*
|
||||
* For description of parameters and exceptions thrown see
|
||||
* {@link ClientProtocol#create()}
|
||||
* {@link ClientProtocol#create()}, except it returns valid file status
|
||||
* upon success
|
||||
*/
|
||||
void startFile(String src, PermissionStatus permissions, String holder,
|
||||
String clientMachine, EnumSet<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize) throws AccessControlException,
|
||||
SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
|
||||
HdfsFileStatus startFile(String src, PermissionStatus permissions,
|
||||
String holder, String clientMachine, EnumSet<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize)
|
||||
throws AccessControlException, SafeModeException,
|
||||
FileAlreadyExistsException, UnresolvedLinkException,
|
||||
FileNotFoundException, ParentNotDirectoryException, IOException {
|
||||
try {
|
||||
startFileInt(src, permissions, holder, clientMachine, flag, createParent,
|
||||
replication, blockSize);
|
||||
return startFileInt(src, permissions, holder, clientMachine, flag,
|
||||
createParent, replication, blockSize);
|
||||
} catch (AccessControlException e) {
|
||||
if (isAuditEnabled() && isExternalInvocation()) {
|
||||
logAuditEvent(false, UserGroupInformation.getCurrentUser(),
|
||||
|
@ -1811,18 +1813,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
}
|
||||
|
||||
private void startFileInt(String src, PermissionStatus permissions, String holder,
|
||||
String clientMachine, EnumSet<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize) throws AccessControlException,
|
||||
SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
|
||||
private HdfsFileStatus startFileInt(String src, PermissionStatus permissions,
|
||||
String holder, String clientMachine, EnumSet<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize)
|
||||
throws AccessControlException, SafeModeException,
|
||||
FileAlreadyExistsException, UnresolvedLinkException,
|
||||
FileNotFoundException, ParentNotDirectoryException, IOException {
|
||||
boolean skipSync = false;
|
||||
final HdfsFileStatus stat;
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
||||
startFileInternal(src, permissions, holder, clientMachine, flag,
|
||||
createParent, replication, blockSize);
|
||||
stat = dir.getFileInfo(src, false);
|
||||
} catch (StandbyException se) {
|
||||
skipSync = true;
|
||||
throw se;
|
||||
|
@ -1836,11 +1841,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
if (isAuditEnabled() && isExternalInvocation()) {
|
||||
final HdfsFileStatus stat = dir.getFileInfo(src, false);
|
||||
logAuditEvent(UserGroupInformation.getCurrentUser(),
|
||||
getRemoteIp(),
|
||||
"create", src, null, stat);
|
||||
}
|
||||
return stat;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2207,20 +2212,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* are replicated. Will return an empty 2-elt array if we want the
|
||||
* client to "try again later".
|
||||
*/
|
||||
LocatedBlock getAdditionalBlock(String src,
|
||||
String clientName,
|
||||
ExtendedBlock previous,
|
||||
HashMap<Node, Node> excludedNodes
|
||||
)
|
||||
LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
|
||||
ExtendedBlock previous, HashMap<Node, Node> excludedNodes)
|
||||
throws LeaseExpiredException, NotReplicatedYetException,
|
||||
QuotaExceededException, SafeModeException, UnresolvedLinkException,
|
||||
IOException {
|
||||
checkBlock(previous);
|
||||
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
|
||||
long fileLength, blockSize;
|
||||
long blockSize;
|
||||
int replication;
|
||||
DatanodeDescriptor clientNode = null;
|
||||
Block newBlock = null;
|
||||
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug(
|
||||
|
@ -2228,118 +2227,61 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
+src+" for "+clientName);
|
||||
}
|
||||
|
||||
writeLock();
|
||||
// Part I. Analyze the state of the file with respect to the input data.
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
|
||||
final INode[] inodes = analyzeFileState(
|
||||
src, fileId, clientName, previous, onRetryBlock).getINodes();
|
||||
final INodeFileUnderConstruction pendingFile =
|
||||
(INodeFileUnderConstruction) inodes[inodes.length - 1];
|
||||
|
||||
if (isInSafeMode()) {
|
||||
throw new SafeModeException("Cannot add block to " + src, safeMode);
|
||||
if(onRetryBlock[0] != null) {
|
||||
// This is a retry. Just return the last block.
|
||||
return onRetryBlock[0];
|
||||
}
|
||||
|
||||
// have we exceeded the configured limit of fs objects.
|
||||
checkFsObjectLimit();
|
||||
|
||||
INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
|
||||
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
|
||||
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
|
||||
// The block that the client claims is the current last block
|
||||
// doesn't match up with what we think is the last block. There are
|
||||
// three possibilities:
|
||||
// 1) This is the first block allocation of an append() pipeline
|
||||
// which started appending exactly at a block boundary.
|
||||
// In this case, the client isn't passed the previous block,
|
||||
// so it makes the allocateBlock() call with previous=null.
|
||||
// We can distinguish this since the last block of the file
|
||||
// will be exactly a full block.
|
||||
// 2) This is a retry from a client that missed the response of a
|
||||
// prior getAdditionalBlock() call, perhaps because of a network
|
||||
// timeout, or because of an HA failover. In that case, we know
|
||||
// by the fact that the client is re-issuing the RPC that it
|
||||
// never began to write to the old block. Hence it is safe to
|
||||
// abandon it and allocate a new one.
|
||||
// 3) This is an entirely bogus request/bug -- we should error out
|
||||
// rather than potentially appending a new block with an empty
|
||||
// one in the middle, etc
|
||||
|
||||
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
|
||||
if (previous == null &&
|
||||
lastBlockInFile != null &&
|
||||
lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
|
||||
lastBlockInFile.isComplete()) {
|
||||
// Case 1
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug(
|
||||
"BLOCK* NameSystem.allocateBlock: handling block allocation" +
|
||||
" writing to a file with a complete previous block: src=" +
|
||||
src + " lastBlock=" + lastBlockInFile);
|
||||
}
|
||||
} else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
|
||||
// Case 2
|
||||
if (lastBlockInFile.getNumBytes() != 0) {
|
||||
throw new IOException(
|
||||
"Request looked like a retry to allocate block " +
|
||||
lastBlockInFile + " but it already contains " +
|
||||
lastBlockInFile.getNumBytes() + " bytes");
|
||||
}
|
||||
|
||||
// The retry case ("b" above) -- abandon the old block.
|
||||
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
|
||||
"caught retry for allocation of a new block in " +
|
||||
src + ". Abandoning old block " + lastBlockInFile);
|
||||
dir.removeBlock(src, pendingFile, lastBlockInFile);
|
||||
dir.persistBlocks(src, pendingFile);
|
||||
} else {
|
||||
|
||||
throw new IOException("Cannot allocate block in " + src + ": " +
|
||||
"passed 'previous' block " + previous + " does not match actual " +
|
||||
"last block in file " + lastBlockInFile);
|
||||
}
|
||||
}
|
||||
|
||||
// commit the last block and complete it if it has minimum replicas
|
||||
commitOrCompleteLastBlock(pendingFile, previousBlock);
|
||||
|
||||
//
|
||||
// If we fail this, bad things happen!
|
||||
//
|
||||
if (!checkFileProgress(pendingFile, false)) {
|
||||
throw new NotReplicatedYetException("Not replicated yet:" + src);
|
||||
}
|
||||
fileLength = pendingFile.computeContentSummary().getLength();
|
||||
blockSize = pendingFile.getPreferredBlockSize();
|
||||
clientNode = pendingFile.getClientNode();
|
||||
replication = pendingFile.getFileReplication();
|
||||
} finally {
|
||||
writeUnlock();
|
||||
readUnlock();
|
||||
}
|
||||
|
||||
// choose targets for the new block to be allocated.
|
||||
final DatanodeDescriptor targets[] = blockManager.chooseTarget(
|
||||
final DatanodeDescriptor targets[] = getBlockManager().chooseTarget(
|
||||
src, replication, clientNode, excludedNodes, blockSize);
|
||||
|
||||
// Allocate a new block and record it in the INode.
|
||||
// Part II.
|
||||
// Allocate a new block, add it to the INode and the BlocksMap.
|
||||
Block newBlock = null;
|
||||
long offset;
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
if (isInSafeMode()) {
|
||||
throw new SafeModeException("Cannot add block to " + src, safeMode);
|
||||
// Run the full analysis again, since things could have changed
|
||||
// while chooseTarget() was executing.
|
||||
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
|
||||
INodesInPath inodesInPath =
|
||||
analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
|
||||
INode[] inodes = inodesInPath.getINodes();
|
||||
final INodeFileUnderConstruction pendingFile =
|
||||
(INodeFileUnderConstruction) inodes[inodes.length - 1];
|
||||
|
||||
if(onRetryBlock[0] != null) {
|
||||
// This is a retry. Just return the last block.
|
||||
return onRetryBlock[0];
|
||||
}
|
||||
|
||||
final INodesInPath iip = dir.getINodesInPath4Write(src);
|
||||
final INodeFileUnderConstruction pendingFile
|
||||
= checkLease(src, clientName, iip.getLastINode());
|
||||
|
||||
if (!checkFileProgress(pendingFile, false)) {
|
||||
throw new NotReplicatedYetException("Not replicated yet:" + src);
|
||||
}
|
||||
// commit the last block and complete it if it has minimum replicas
|
||||
commitOrCompleteLastBlock(pendingFile,
|
||||
ExtendedBlock.getLocalBlock(previous));
|
||||
|
||||
// allocate new block, record block locations in INode.
|
||||
newBlock = createNewBlock();
|
||||
saveAllocatedBlock(src, inodesInPath, newBlock, targets);
|
||||
|
||||
// allocate new block record block locations in INode.
|
||||
newBlock = allocateBlock(src, iip, targets);
|
||||
|
||||
for (DatanodeDescriptor dn : targets) {
|
||||
dn.incBlocksScheduled();
|
||||
}
|
||||
dir.persistBlocks(src, pendingFile);
|
||||
offset = pendingFile.computeFileSize(true);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
|
@ -2347,10 +2289,114 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
getEditLog().logSync();
|
||||
}
|
||||
|
||||
// Create next block
|
||||
LocatedBlock b = new LocatedBlock(getExtendedBlock(newBlock), targets, fileLength);
|
||||
blockManager.setBlockToken(b, BlockTokenSecretManager.AccessMode.WRITE);
|
||||
return b;
|
||||
// Return located block
|
||||
return makeLocatedBlock(newBlock, targets, offset);
|
||||
}
|
||||
|
||||
INodesInPath analyzeFileState(String src,
|
||||
long fileId,
|
||||
String clientName,
|
||||
ExtendedBlock previous,
|
||||
LocatedBlock[] onRetryBlock)
|
||||
throws IOException {
|
||||
assert hasReadOrWriteLock();
|
||||
|
||||
checkBlock(previous);
|
||||
onRetryBlock[0] = null;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
if (isInSafeMode()) {
|
||||
throw new SafeModeException("Cannot add block to " + src, safeMode);
|
||||
}
|
||||
|
||||
// have we exceeded the configured limit of fs objects.
|
||||
checkFsObjectLimit();
|
||||
|
||||
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
|
||||
final INodesInPath inodesInPath = dir.getINodesInPath4Write(src);
|
||||
final INode[] inodes = inodesInPath.getINodes();
|
||||
final INodeFileUnderConstruction pendingFile
|
||||
= checkLease(src, fileId, clientName, inodes[inodes.length - 1]);
|
||||
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
|
||||
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
|
||||
// The block that the client claims is the current last block
|
||||
// doesn't match up with what we think is the last block. There are
|
||||
// four possibilities:
|
||||
// 1) This is the first block allocation of an append() pipeline
|
||||
// which started appending exactly at a block boundary.
|
||||
// In this case, the client isn't passed the previous block,
|
||||
// so it makes the allocateBlock() call with previous=null.
|
||||
// We can distinguish this since the last block of the file
|
||||
// will be exactly a full block.
|
||||
// 2) This is a retry from a client that missed the response of a
|
||||
// prior getAdditionalBlock() call, perhaps because of a network
|
||||
// timeout, or because of an HA failover. In that case, we know
|
||||
// by the fact that the client is re-issuing the RPC that it
|
||||
// never began to write to the old block. Hence it is safe to
|
||||
// to return the existing block.
|
||||
// 3) This is an entirely bogus request/bug -- we should error out
|
||||
// rather than potentially appending a new block with an empty
|
||||
// one in the middle, etc
|
||||
// 4) This is a retry from a client that timed out while
|
||||
// the prior getAdditionalBlock() is still being processed,
|
||||
// currently working on chooseTarget().
|
||||
// There are no means to distinguish between the first and
|
||||
// the second attempts in Part I, because the first one hasn't
|
||||
// changed the namesystem state yet.
|
||||
// We run this analysis again in Part II where case 4 is impossible.
|
||||
|
||||
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
|
||||
if (previous == null &&
|
||||
lastBlockInFile != null &&
|
||||
lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
|
||||
lastBlockInFile.isComplete()) {
|
||||
// Case 1
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug(
|
||||
"BLOCK* NameSystem.allocateBlock: handling block allocation" +
|
||||
" writing to a file with a complete previous block: src=" +
|
||||
src + " lastBlock=" + lastBlockInFile);
|
||||
}
|
||||
} else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
|
||||
if (lastBlockInFile.getNumBytes() != 0) {
|
||||
throw new IOException(
|
||||
"Request looked like a retry to allocate block " +
|
||||
lastBlockInFile + " but it already contains " +
|
||||
lastBlockInFile.getNumBytes() + " bytes");
|
||||
}
|
||||
|
||||
// Case 2
|
||||
// Return the last block.
|
||||
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
|
||||
"caught retry for allocation of a new block in " +
|
||||
src + ". Returning previously allocated block " + lastBlockInFile);
|
||||
long offset = pendingFile.computeFileSize(true);
|
||||
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
|
||||
((BlockInfoUnderConstruction)lastBlockInFile).getExpectedLocations(),
|
||||
offset);
|
||||
return inodesInPath;
|
||||
} else {
|
||||
// Case 3
|
||||
throw new IOException("Cannot allocate block in " + src + ": " +
|
||||
"passed 'previous' block " + previous + " does not match actual " +
|
||||
"last block in file " + lastBlockInFile);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the penultimate block is minimally replicated
|
||||
if (!checkFileProgress(pendingFile, false)) {
|
||||
throw new NotReplicatedYetException("Not replicated yet: " + src);
|
||||
}
|
||||
return inodesInPath;
|
||||
}
|
||||
|
||||
LocatedBlock makeLocatedBlock(Block blk,
|
||||
DatanodeInfo[] locs,
|
||||
long offset) throws IOException {
|
||||
LocatedBlock lBlk = new LocatedBlock(
|
||||
getExtendedBlock(blk), locs, offset);
|
||||
getBlockManager().setBlockToken(
|
||||
lBlk, BlockTokenSecretManager.AccessMode.WRITE);
|
||||
return lBlk;
|
||||
}
|
||||
|
||||
/** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
|
||||
|
@ -2438,13 +2484,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
/** make sure that we still have the lease on this file. */
|
||||
private INodeFileUnderConstruction checkLease(String src, String holder)
|
||||
throws LeaseExpiredException, UnresolvedLinkException {
|
||||
return checkLease(src, holder, dir.getINode(src));
|
||||
private INodeFileUnderConstruction checkLease(String src, String holder)
|
||||
throws LeaseExpiredException, UnresolvedLinkException,
|
||||
FileNotFoundException {
|
||||
return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder,
|
||||
dir.getINode(src));
|
||||
}
|
||||
|
||||
private INodeFileUnderConstruction checkLease(String src, String holder,
|
||||
INode file) throws LeaseExpiredException {
|
||||
|
||||
private INodeFileUnderConstruction checkLease(String src, long fileId,
|
||||
String holder, INode file) throws LeaseExpiredException,
|
||||
FileNotFoundException {
|
||||
assert hasReadOrWriteLock();
|
||||
if (file == null || !(file instanceof INodeFile)) {
|
||||
Lease lease = leaseManager.getLease(holder);
|
||||
|
@ -2465,6 +2514,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
throw new LeaseExpiredException("Lease mismatch on " + src + " owned by "
|
||||
+ pendingFile.getClientName() + " but is accessed by " + holder);
|
||||
}
|
||||
INodeId.checkId(fileId, pendingFile);
|
||||
return pendingFile;
|
||||
}
|
||||
|
||||
|
@ -2506,7 +2556,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
final INodesInPath iip = dir.getLastINodeInPath(src);
|
||||
final INodeFileUnderConstruction pendingFile;
|
||||
try {
|
||||
pendingFile = checkLease(src, holder, iip.getINode(0));
|
||||
pendingFile = checkLease(src, INodeId.GRANDFATHER_INODE_ID,
|
||||
holder, iip.getINode(0));
|
||||
} catch (LeaseExpiredException lee) {
|
||||
final INode inode = dir.getINode(src);
|
||||
if (inode != null && inode instanceof INodeFile && !inode.isUnderConstruction()) {
|
||||
|
@ -2543,22 +2594,33 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
/**
|
||||
* Allocate a block at the given pending filename
|
||||
* Save allocated block at the given pending filename
|
||||
*
|
||||
* @param src path to the file
|
||||
* @param inodesInPath representing each of the components of src.
|
||||
* The last INode is the INode for the file.
|
||||
* @throws QuotaExceededException If addition of block exceeds space quota
|
||||
*/
|
||||
private Block allocateBlock(String src, INodesInPath inodesInPath,
|
||||
DatanodeDescriptor targets[]) throws IOException {
|
||||
BlockInfo saveAllocatedBlock(String src, INodesInPath inodesInPath,
|
||||
Block newBlock, DatanodeDescriptor targets[]) throws IOException {
|
||||
assert hasWriteLock();
|
||||
BlockInfo b = dir.addBlock(src, inodesInPath, newBlock, targets);
|
||||
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
|
||||
+ getBlockPoolId() + " " + b);
|
||||
for (DatanodeDescriptor dn : targets) {
|
||||
dn.incBlocksScheduled();
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create new block with a unique block id and a new generation stamp.
|
||||
*/
|
||||
Block createNewBlock() throws IOException {
|
||||
assert hasWriteLock();
|
||||
Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0);
|
||||
// Increment the generation stamp for every new block.
|
||||
b.setGenerationStamp(nextGenerationStamp());
|
||||
b = dir.addBlock(src, inodesInPath, b, targets);
|
||||
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
|
||||
+ blockPoolId + " " + b);
|
||||
return b;
|
||||
}
|
||||
|
||||
|
@ -5623,7 +5685,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
@Override
|
||||
public boolean isAvoidingStaleDataNodesForWrite() {
|
||||
return this.blockManager.getDatanodeManager()
|
||||
.isAvoidingStaleDataNodesForWrite();
|
||||
.shouldAvoidStaleDataNodesForWrite();
|
||||
}
|
||||
|
||||
public SnapshotManager getSnapshotManager() {
|
||||
|
|
|
@ -17,18 +17,21 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.util.SequentialNumber;
|
||||
|
||||
/**
|
||||
* An id which uniquely identifies an inode
|
||||
* An id which uniquely identifies an inode. Id 1 to 1000 are reserved for
|
||||
* potential future usage. The id won't be recycled and is not expected to wrap
|
||||
* around in a very long time. Root inode id is always 1001. Id 0 is used for
|
||||
* backward compatibility support.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class INodeId extends SequentialNumber {
|
||||
public class INodeId extends SequentialNumber {
|
||||
/**
|
||||
* The last reserved inode id. Reserve id 1 to 1000 for potential future
|
||||
* usage. The id won't be recycled and is not expected to wrap around in a
|
||||
* very long time. Root inode id will be 1001.
|
||||
* The last reserved inode id.
|
||||
*/
|
||||
public static final long LAST_RESERVED_ID = 1000L;
|
||||
|
||||
|
@ -38,6 +41,19 @@ class INodeId extends SequentialNumber {
|
|||
*/
|
||||
public static final long GRANDFATHER_INODE_ID = 0;
|
||||
|
||||
/**
|
||||
* To check if the request id is the same as saved id. Don't check fileId
|
||||
* with GRANDFATHER_INODE_ID for backward compatibility.
|
||||
*/
|
||||
public static void checkId(long requestId, INode inode)
|
||||
throws FileNotFoundException {
|
||||
if (requestId != GRANDFATHER_INODE_ID && requestId != inode.getId()) {
|
||||
throw new FileNotFoundException(
|
||||
"ID mismatch. Request id and saved id: " + requestId + " , "
|
||||
+ inode.getId());
|
||||
}
|
||||
}
|
||||
|
||||
INodeId() {
|
||||
super(LAST_RESERVED_ID);
|
||||
}
|
||||
|
|
|
@ -587,7 +587,7 @@ public class NNStorage extends Storage implements Closeable,
|
|||
}
|
||||
|
||||
// Set Block pool ID in version with federation support
|
||||
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
||||
if (versionSupportsFederation()) {
|
||||
String sbpid = props.getProperty("blockpoolID");
|
||||
setBlockPoolID(sd.getRoot(), sbpid);
|
||||
}
|
||||
|
@ -634,7 +634,7 @@ public class NNStorage extends Storage implements Closeable,
|
|||
) throws IOException {
|
||||
super.setPropertiesFromFields(props, sd);
|
||||
// Set blockpoolID in version with federation support
|
||||
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
||||
if (versionSupportsFederation()) {
|
||||
props.setProperty("blockpoolID", blockpoolID);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -424,13 +424,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public void create(String src,
|
||||
FsPermission masked,
|
||||
String clientName,
|
||||
EnumSetWritable<CreateFlag> flag,
|
||||
boolean createParent,
|
||||
short replication,
|
||||
long blockSize) throws IOException {
|
||||
public HdfsFileStatus create(String src, FsPermission masked,
|
||||
String clientName, EnumSetWritable<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize)
|
||||
throws IOException {
|
||||
String clientMachine = getClientMachine();
|
||||
if (stateChangeLog.isDebugEnabled()) {
|
||||
stateChangeLog.debug("*DIR* NameNode.create: file "
|
||||
|
@ -440,12 +437,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
throw new IOException("create: Pathname too long. Limit "
|
||||
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
|
||||
}
|
||||
namesystem.startFile(src,
|
||||
new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
|
||||
null, masked),
|
||||
clientName, clientMachine, flag.get(), createParent, replication, blockSize);
|
||||
HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
|
||||
UserGroupInformation.getCurrentUser().getShortUserName(), null, masked),
|
||||
clientName, clientMachine, flag.get(), createParent, replication,
|
||||
blockSize);
|
||||
metrics.incrFilesCreated();
|
||||
metrics.incrCreateFileOps();
|
||||
return fileStatus;
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
|
@ -484,26 +482,24 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
throws IOException {
|
||||
namesystem.setOwner(src, username, groupname);
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public LocatedBlock addBlock(String src,
|
||||
String clientName,
|
||||
ExtendedBlock previous,
|
||||
DatanodeInfo[] excludedNodes)
|
||||
|
||||
@Override
|
||||
public LocatedBlock addBlock(String src, String clientName,
|
||||
ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId)
|
||||
throws IOException {
|
||||
if(stateChangeLog.isDebugEnabled()) {
|
||||
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
|
||||
+src+" for "+clientName);
|
||||
if (stateChangeLog.isDebugEnabled()) {
|
||||
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
|
||||
+ " fileId=" + fileId + " for " + clientName);
|
||||
}
|
||||
HashMap<Node, Node> excludedNodesSet = null;
|
||||
if (excludedNodes != null) {
|
||||
excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length);
|
||||
for (Node node:excludedNodes) {
|
||||
for (Node node : excludedNodes) {
|
||||
excludedNodesSet.put(node, node);
|
||||
}
|
||||
}
|
||||
LocatedBlock locatedBlock =
|
||||
namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
|
||||
LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
|
||||
clientName, previous, excludedNodesSet);
|
||||
if (locatedBlock != null)
|
||||
metrics.incrAddBlockOps();
|
||||
return locatedBlock;
|
||||
|
|
|
@ -17,12 +17,15 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryMXBean;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URLEncoder;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
|
@ -64,6 +67,14 @@ import org.znerd.xmlenc.XMLOutputter;
|
|||
import com.google.common.base.Preconditions;
|
||||
|
||||
class NamenodeJspHelper {
|
||||
static String fraction2String(double value) {
|
||||
return StringUtils.format("%.2f", value);
|
||||
}
|
||||
|
||||
static String fraction2String(long numerator, long denominator) {
|
||||
return fraction2String(numerator/(double)denominator);
|
||||
}
|
||||
|
||||
static String getSafeModeText(FSNamesystem fsn) {
|
||||
if (!fsn.isInSafeMode())
|
||||
return "";
|
||||
|
@ -361,20 +372,20 @@ class NamenodeJspHelper {
|
|||
+ "DFS Remaining" + colTxt() + ":" + colTxt()
|
||||
+ StringUtils.byteDesc(remaining) + rowTxt() + colTxt() + "DFS Used%"
|
||||
+ colTxt() + ":" + colTxt()
|
||||
+ StringUtils.limitDecimalTo2(percentUsed) + " %" + rowTxt()
|
||||
+ percent2String(percentUsed) + rowTxt()
|
||||
+ colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt()
|
||||
+ StringUtils.limitDecimalTo2(percentRemaining) + " %"
|
||||
+ percent2String(percentRemaining)
|
||||
+ rowTxt() + colTxt() + "Block Pool Used" + colTxt() + ":" + colTxt()
|
||||
+ StringUtils.byteDesc(bpUsed) + rowTxt()
|
||||
+ colTxt() + "Block Pool Used%"+ colTxt() + ":" + colTxt()
|
||||
+ StringUtils.limitDecimalTo2(percentBpUsed) + " %"
|
||||
+ percent2String(percentBpUsed)
|
||||
+ rowTxt() + colTxt() + "DataNodes usages" + colTxt() + ":" + colTxt()
|
||||
+ "Min %" + colTxt() + "Median %" + colTxt() + "Max %" + colTxt()
|
||||
+ "stdev %" + rowTxt() + colTxt() + colTxt() + colTxt()
|
||||
+ StringUtils.limitDecimalTo2(min) + " %"
|
||||
+ colTxt() + StringUtils.limitDecimalTo2(median) + " %"
|
||||
+ colTxt() + StringUtils.limitDecimalTo2(max) + " %"
|
||||
+ colTxt() + StringUtils.limitDecimalTo2(dev) + " %"
|
||||
+ percent2String(min)
|
||||
+ colTxt() + percent2String(median)
|
||||
+ colTxt() + percent2String(max)
|
||||
+ colTxt() + percent2String(dev)
|
||||
+ rowTxt() + colTxt()
|
||||
+ "<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> "
|
||||
+ colTxt() + ":" + colTxt() + live.size()
|
||||
|
@ -443,7 +454,13 @@ class NamenodeJspHelper {
|
|||
nodeToRedirect = nn.getHttpAddress().getHostName();
|
||||
redirectPort = nn.getHttpAddress().getPort();
|
||||
}
|
||||
String addr = nn.getNameNodeAddressHostPortString();
|
||||
|
||||
InetSocketAddress rpcAddr = nn.getNameNodeAddress();
|
||||
String rpcHost = rpcAddr.getAddress().isAnyLocalAddress()
|
||||
? URI.create(request.getRequestURL().toString()).getHost()
|
||||
: rpcAddr.getAddress().getHostAddress();
|
||||
String addr = rpcHost + ":" + rpcAddr.getPort();
|
||||
|
||||
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
|
||||
redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
|
||||
+ "/browseDirectory.jsp?namenodeInfoPort="
|
||||
|
@ -562,9 +579,9 @@ class NamenodeJspHelper {
|
|||
long u = d.getDfsUsed();
|
||||
long nu = d.getNonDfsUsed();
|
||||
long r = d.getRemaining();
|
||||
String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());
|
||||
String percentRemaining = StringUtils.limitDecimalTo2(d
|
||||
.getRemainingPercent());
|
||||
final double percentUsedValue = d.getDfsUsedPercent();
|
||||
String percentUsed = fraction2String(percentUsedValue);
|
||||
String percentRemaining = fraction2String(d.getRemainingPercent());
|
||||
|
||||
String adminState = d.getAdminState().toString();
|
||||
|
||||
|
@ -572,32 +589,30 @@ class NamenodeJspHelper {
|
|||
long currentTime = Time.now();
|
||||
|
||||
long bpUsed = d.getBlockPoolUsed();
|
||||
String percentBpUsed = StringUtils.limitDecimalTo2(d
|
||||
.getBlockPoolUsedPercent());
|
||||
String percentBpUsed = fraction2String(d.getBlockPoolUsedPercent());
|
||||
|
||||
out.print("<td class=\"lastcontact\"> "
|
||||
+ ((currentTime - timestamp) / 1000)
|
||||
+ "<td class=\"adminstate\">"
|
||||
+ adminState
|
||||
+ "<td align=\"right\" class=\"capacity\">"
|
||||
+ StringUtils.limitDecimalTo2(c * 1.0 / diskBytes)
|
||||
+ fraction2String(c, diskBytes)
|
||||
+ "<td align=\"right\" class=\"used\">"
|
||||
+ StringUtils.limitDecimalTo2(u * 1.0 / diskBytes)
|
||||
+ fraction2String(u, diskBytes)
|
||||
+ "<td align=\"right\" class=\"nondfsused\">"
|
||||
+ StringUtils.limitDecimalTo2(nu * 1.0 / diskBytes)
|
||||
+ fraction2String(nu, diskBytes)
|
||||
+ "<td align=\"right\" class=\"remaining\">"
|
||||
+ StringUtils.limitDecimalTo2(r * 1.0 / diskBytes)
|
||||
+ fraction2String(r, diskBytes)
|
||||
+ "<td align=\"right\" class=\"pcused\">"
|
||||
+ percentUsed
|
||||
+ "<td class=\"pcused\">"
|
||||
+ ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed),
|
||||
100)
|
||||
+ ServletUtil.percentageGraph((int)percentUsedValue, 100)
|
||||
+ "<td align=\"right\" class=\"pcremaining\">"
|
||||
+ percentRemaining
|
||||
+ "<td title=" + "\"blocks scheduled : "
|
||||
+ d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks()+"\n"
|
||||
+ "<td align=\"right\" class=\"bpused\">"
|
||||
+ StringUtils.limitDecimalTo2(bpUsed * 1.0 / diskBytes)
|
||||
+ fraction2String(bpUsed, diskBytes)
|
||||
+ "<td align=\"right\" class=\"pcbpused\">"
|
||||
+ percentBpUsed
|
||||
+ "<td align=\"right\" class=\"volfails\">"
|
||||
|
|
|
@ -475,14 +475,20 @@ public class SecondaryNameNode implements Runnable {
|
|||
// Returns a token that would be used to upload the merged image.
|
||||
CheckpointSignature sig = namenode.rollEditLog();
|
||||
|
||||
if ((checkpointImage.getNamespaceID() == 0) ||
|
||||
(sig.isSameCluster(checkpointImage) &&
|
||||
boolean loadImage = false;
|
||||
boolean isFreshCheckpointer = (checkpointImage.getNamespaceID() == 0);
|
||||
boolean isSameCluster =
|
||||
(dstStorage.versionSupportsFederation() && sig.isSameCluster(checkpointImage)) ||
|
||||
(!dstStorage.versionSupportsFederation() && sig.namespaceIdMatches(checkpointImage));
|
||||
if (isFreshCheckpointer ||
|
||||
(isSameCluster &&
|
||||
!sig.storageVersionMatches(checkpointImage.getStorage()))) {
|
||||
// if we're a fresh 2NN, or if we're on the same cluster and our storage
|
||||
// needs an upgrade, just take the storage info from the server.
|
||||
dstStorage.setStorageInfo(sig);
|
||||
dstStorage.setClusterID(sig.getClusterID());
|
||||
dstStorage.setBlockPoolID(sig.getBlockpoolID());
|
||||
loadImage = true;
|
||||
}
|
||||
sig.validateStorageInfo(checkpointImage);
|
||||
|
||||
|
@ -492,7 +498,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
RemoteEditLogManifest manifest =
|
||||
namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
|
||||
|
||||
boolean loadImage = downloadCheckpointFiles(
|
||||
loadImage |= downloadCheckpointFiles(
|
||||
fsName, checkpointImage, sig, manifest); // Fetch fsimage and edits
|
||||
doMerge(sig, manifest, loadImage, checkpointImage, namesystem);
|
||||
|
||||
|
|
|
@ -316,8 +316,7 @@ public class DFSAdmin extends FsShell {
|
|||
System.out.println("DFS Used: " + used
|
||||
+ " (" + StringUtils.byteDesc(used) + ")");
|
||||
System.out.println("DFS Used%: "
|
||||
+ StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
|
||||
+ "%");
|
||||
+ StringUtils.formatPercent(used/(double)presentCapacity, 2));
|
||||
|
||||
/* These counts are not always upto date. They are updated after
|
||||
* iteration of an internal list. Should be updated in a few seconds to
|
||||
|
|
|
@ -219,6 +219,7 @@ public class JsonUtil {
|
|||
m.put("modificationTime", status.getModificationTime());
|
||||
m.put("blockSize", status.getBlockSize());
|
||||
m.put("replication", status.getReplication());
|
||||
m.put("fileId", status.getFileId());
|
||||
return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m);
|
||||
}
|
||||
|
||||
|
@ -243,9 +244,10 @@ public class JsonUtil {
|
|||
final long mTime = (Long) m.get("modificationTime");
|
||||
final long blockSize = (Long) m.get("blockSize");
|
||||
final short replication = (short) (long) (Long) m.get("replication");
|
||||
final long fileId = (Long) m.get("fileId");
|
||||
return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
|
||||
blockSize, mTime, aTime, permission, owner, group,
|
||||
symlink, DFSUtil.string2Bytes(localName));
|
||||
symlink, DFSUtil.string2Bytes(localName), fileId);
|
||||
}
|
||||
|
||||
/** Convert an ExtendedBlock to a Json map. */
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.web.resources;
|
|||
/** The concat source paths parameter. */
|
||||
public class ConcatSourcesParam extends StringParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "srcs";
|
||||
public static final String NAME = "sources";
|
||||
|
||||
public static final String DEFAULT = NULL;
|
||||
|
||||
|
|
|
@ -67,7 +67,8 @@ message CreateRequestProto {
|
|||
required uint64 blockSize = 7;
|
||||
}
|
||||
|
||||
message CreateResponseProto { // void response
|
||||
message CreateResponseProto {
|
||||
optional HdfsFileStatusProto fs = 1;
|
||||
}
|
||||
|
||||
message AppendRequestProto {
|
||||
|
@ -119,6 +120,7 @@ message AddBlockRequestProto {
|
|||
required string clientName = 2;
|
||||
optional ExtendedBlockProto previous = 3;
|
||||
repeated DatanodeInfoProto excludeNodes = 4;
|
||||
optional uint64 fileId = 5 [default = 0]; // default as a bogus id
|
||||
}
|
||||
|
||||
message AddBlockResponseProto {
|
||||
|
|
|
@ -170,6 +170,9 @@ message HdfsFileStatusProto {
|
|||
optional uint32 block_replication = 10 [default = 0]; // only 16bits used
|
||||
optional uint64 blocksize = 11 [default = 0];
|
||||
optional LocatedBlocksProto locations = 12; // suppled only if asked by client
|
||||
|
||||
// Optional field for fileId
|
||||
optional uint64 fileId = 13 [default = 0]; // default as an invalid id
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -999,17 +999,14 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.check.stale.datanode</name>
|
||||
<name>dfs.namenode.avoid.read.stale.datanode</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
Indicate whether or not to check "stale" datanodes whose
|
||||
Indicate whether or not to avoid reading from "stale" datanodes whose
|
||||
heartbeat messages have not been received by the namenode
|
||||
for more than a specified time interval. If this configuration
|
||||
parameter is set as true, the system will keep track
|
||||
of the number of stale datanodes. The stale datanodes will be
|
||||
for more than a specified time interval. Stale datanodes will be
|
||||
moved to the end of the node list returned for reading. See
|
||||
dfs.namenode.avoid.write.stale.datanode for details on how this
|
||||
affects writes.
|
||||
dfs.namenode.avoid.write.stale.datanode for a similar setting for writes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
@ -1017,13 +1014,13 @@
|
|||
<name>dfs.namenode.avoid.write.stale.datanode</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
Indicate whether or not to avoid writing to "stale" datanodes whose
|
||||
Indicate whether or not to avoid writing to "stale" datanodes whose
|
||||
heartbeat messages have not been received by the namenode
|
||||
for more than a specified time interval. If this configuration
|
||||
parameter and dfs.namenode.check.stale.datanode are both set as true,
|
||||
the writing will avoid using stale datanodes unless a high number
|
||||
of datanodes are marked as stale. See
|
||||
dfs.namenode.write.stale.datanode.ratio for details.
|
||||
for more than a specified time interval. Writes will avoid using
|
||||
stale datanodes unless more than a configured ratio
|
||||
(dfs.namenode.write.stale.datanode.ratio) of datanodes are marked as
|
||||
stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting
|
||||
for reads.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
|
|
@ -109,6 +109,9 @@ WebHDFS REST API
|
|||
* {{{Append to a File}<<<APPEND>>>}}
|
||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
|
||||
|
||||
* {{{Concat File(s)}<<<CONCAT>>>}}
|
||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
|
||||
|
||||
* HTTP DELETE
|
||||
|
||||
* {{{Delete a File/Directory}<<<DELETE>>>}}
|
||||
|
@ -299,6 +302,32 @@ Content-Length: 0
|
|||
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append
|
||||
|
||||
|
||||
** {Concat File(s)}
|
||||
|
||||
* Submit a HTTP POST request.
|
||||
|
||||
+---------------------------------
|
||||
curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<SOURCES>"
|
||||
+---------------------------------
|
||||
|
||||
The client receives a response with zero content length:
|
||||
|
||||
+---------------------------------
|
||||
HTTP/1.1 200 OK
|
||||
Content-Length: 0
|
||||
+---------------------------------
|
||||
|
||||
[]
|
||||
|
||||
This REST API call is available as of Hadoop version 2.0.3.
|
||||
Please note that <SOURCES> is a comma seperated list of absolute paths.
|
||||
(Example: sources=/test/file1,/test/file2,/test/file3)
|
||||
|
||||
See also:
|
||||
{{{Sources}<<<sources>>>}},
|
||||
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat
|
||||
|
||||
|
||||
** {Open and Read a File}
|
||||
|
||||
* Submit a HTTP GET request with automatically following redirects.
|
||||
|
@ -1727,6 +1756,29 @@ var tokenProperties =
|
|||
{{{Set Replication Factor}<<<SETREPLICATION>>>}}
|
||||
|
||||
|
||||
** {Sources}
|
||||
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Name | <<<sources>>> |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Description | The comma seperated absolute paths used for concatenation. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Type | String |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Default Value | \<empty\> |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Valid Values | A list of comma seperated absolute FileSystem paths without scheme and authority. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Syntax | See the note in {{Delegation}}. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|
||||
<<Note>> that sources are absolute FileSystem paths.
|
||||
|
||||
|
||||
See also:
|
||||
{{{Concat File(s)}<<<CONCAT>>>}}
|
||||
|
||||
|
||||
** {Token}
|
||||
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.io.FileInputStream;
|
|||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.URL;
|
||||
|
@ -639,6 +640,9 @@ public class DFSTestUtil {
|
|||
*/
|
||||
public static byte[] urlGetBytes(URL url) throws IOException {
|
||||
URLConnection conn = url.openConnection();
|
||||
HttpURLConnection hc = (HttpURLConnection)conn;
|
||||
|
||||
assertEquals(HttpURLConnection.HTTP_OK, hc.getResponseCode());
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
|
||||
return out.toByteArray();
|
||||
|
|
|
@ -23,22 +23,34 @@ import static org.junit.Assert.assertTrue;
|
|||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||
import org.apache.hadoop.net.StandardSocketFactory;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.hamcrest.BaseMatcher;
|
||||
import org.hamcrest.Description;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
public class TestDFSClientFailover {
|
||||
|
||||
|
@ -91,6 +103,63 @@ public class TestDFSClientFailover {
|
|||
fs.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that even a non-idempotent method will properly fail-over if the
|
||||
* first IPC attempt times out trying to connect. Regression test for
|
||||
* HDFS-4404.
|
||||
*/
|
||||
@Test
|
||||
public void testFailoverOnConnectTimeout() throws Exception {
|
||||
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
|
||||
InjectingSocketFactory.class, SocketFactory.class);
|
||||
// Set up the InjectingSocketFactory to throw a ConnectTimeoutException
|
||||
// when connecting to the first NN.
|
||||
InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
|
||||
|
||||
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
|
||||
|
||||
// Make the second NN the active one.
|
||||
cluster.shutdownNameNode(0);
|
||||
cluster.transitionToActive(1);
|
||||
|
||||
// Call a non-idempotent method, and ensure the failover of the call proceeds
|
||||
// successfully.
|
||||
IOUtils.closeStream(fs.create(TEST_FILE));
|
||||
}
|
||||
|
||||
private static class InjectingSocketFactory extends StandardSocketFactory {
|
||||
|
||||
static SocketFactory defaultFactory = SocketFactory.getDefault();
|
||||
|
||||
static int portToInjectOn;
|
||||
|
||||
@Override
|
||||
public Socket createSocket() throws IOException {
|
||||
Socket spy = Mockito.spy(defaultFactory.createSocket());
|
||||
// Simplify our spying job by not having to also spy on the channel
|
||||
Mockito.doReturn(null).when(spy).getChannel();
|
||||
// Throw a ConnectTimeoutException when connecting to our target "bad"
|
||||
// host.
|
||||
Mockito.doThrow(new ConnectTimeoutException("injected"))
|
||||
.when(spy).connect(
|
||||
Mockito.argThat(new MatchesPort()),
|
||||
Mockito.anyInt());
|
||||
return spy;
|
||||
}
|
||||
|
||||
private class MatchesPort extends BaseMatcher<SocketAddress> {
|
||||
@Override
|
||||
public boolean matches(Object arg0) {
|
||||
return ((InetSocketAddress)arg0).getPort() == portToInjectOn;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void describeTo(Description desc) {
|
||||
desc.appendText("matches port " + portToInjectOn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Regression test for HDFS-2683.
|
||||
*/
|
||||
|
|
|
@ -23,7 +23,10 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyBoolean;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
import static org.mockito.Matchers.anyShort;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -49,13 +52,13 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileChecksum;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.client.HdfsUtils;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -64,12 +67,14 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
@ -208,7 +213,7 @@ public class TestDFSClientRetries {
|
|||
* Verify that client will correctly give up after the specified number
|
||||
* of times trying to add a block
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
@SuppressWarnings({ "serial", "unchecked" })
|
||||
@Test
|
||||
public void testNotYetReplicatedErrors() throws IOException
|
||||
{
|
||||
|
@ -235,7 +240,22 @@ public class TestDFSClientRetries {
|
|||
when(mockNN.addBlock(anyString(),
|
||||
anyString(),
|
||||
any(ExtendedBlock.class),
|
||||
any(DatanodeInfo[].class))).thenAnswer(answer);
|
||||
any(DatanodeInfo[].class),
|
||||
anyLong())).thenAnswer(answer);
|
||||
|
||||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010)).when(mockNN).getFileInfo(anyString());
|
||||
|
||||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010))
|
||||
.when(mockNN)
|
||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||
anyShort(), anyLong());
|
||||
|
||||
final DFSClient client = new DFSClient(null, mockNN, conf, null);
|
||||
OutputStream os = client.create("testfile", true);
|
||||
|
@ -369,7 +389,8 @@ public class TestDFSClientRetries {
|
|||
return ret2;
|
||||
}
|
||||
}).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
|
||||
Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
|
||||
Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
|
||||
Mockito.anyLong());
|
||||
|
||||
doAnswer(new Answer<Boolean>() {
|
||||
|
||||
|
@ -410,7 +431,8 @@ public class TestDFSClientRetries {
|
|||
// Make sure the mock was actually properly injected.
|
||||
Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
|
||||
Mockito.anyString(), Mockito.anyString(),
|
||||
Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
|
||||
Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
|
||||
Mockito.anyLong());
|
||||
Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
|
||||
Mockito.anyString(), Mockito.anyString(),
|
||||
Mockito.<ExtendedBlock>any());
|
||||
|
|
|
@ -619,6 +619,16 @@ public class TestDFSUtil {
|
|||
|
||||
assertEquals(1, uris.size());
|
||||
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
|
||||
|
||||
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
|
||||
// it will automatically convert it to hostname
|
||||
conf = new HdfsConfiguration();
|
||||
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
|
||||
uris = DFSUtil.getNameServiceUris(conf);
|
||||
assertEquals(1, uris.size());
|
||||
for (URI uri : uris) {
|
||||
assertFalse(uri.getHost().equals("127.0.0.1"));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
|||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
|
@ -517,8 +518,8 @@ public class TestFileCreation {
|
|||
+ "The file has " + locations.locatedBlockCount() + " blocks.");
|
||||
|
||||
// add one block to the file
|
||||
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
|
||||
client.clientName, null, null);
|
||||
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
|
||||
client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID);
|
||||
System.out.println("testFileCreationError2: "
|
||||
+ "Added block " + location.getBlock());
|
||||
|
||||
|
@ -568,8 +569,8 @@ public class TestFileCreation {
|
|||
final Path f = new Path("/foo.txt");
|
||||
createFile(dfs, f, 3);
|
||||
try {
|
||||
cluster.getNameNodeRpc().addBlock(f.toString(),
|
||||
client.clientName, null, null);
|
||||
cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
|
||||
null, null, INodeId.GRANDFATHER_INODE_ID);
|
||||
fail();
|
||||
} catch(IOException ioe) {
|
||||
FileSystem.LOG.info("GOOD!", ioe);
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TestFileLengthOnClusterRestart {
|
|||
.numDataNodes(2).build();
|
||||
HdfsDataInputStream in = null;
|
||||
try {
|
||||
Path path = new Path(MiniDFSCluster.getBaseDirectory(), "test");
|
||||
Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
|
||||
DistributedFileSystem dfs = (DistributedFileSystem) cluster
|
||||
.getFileSystem();
|
||||
FSDataOutputStream out = dfs.create(path);
|
||||
|
|
|
@ -88,7 +88,7 @@ public class TestGetBlocks {
|
|||
@Test
|
||||
public void testReadSelectNonStaleDatanode() throws Exception {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
|
||||
long staleInterval = 30 * 1000 * 60;
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
|
||||
staleInterval);
|
||||
|
|
|
@ -183,8 +183,7 @@ public class TestLargeBlock {
|
|||
try {
|
||||
|
||||
// create a new file in test data directory
|
||||
Path file1 = new Path(System.getProperty("test.build.data") + "/" +
|
||||
Long.toString(blockSize) + ".dat");
|
||||
Path file1 = new Path("/tmp/TestLargeBlock", blockSize + ".dat");
|
||||
FSDataOutputStream stm = createFile(fs, file1, 1, blockSize);
|
||||
LOG.info("File " + file1 + " created with file size " +
|
||||
fileSize +
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Matchers.anyShort;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
import static org.mockito.Matchers.anyBoolean;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.spy;
|
||||
|
@ -29,14 +33,18 @@ import java.security.PrivilegedExceptionAction;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
|
@ -256,6 +264,7 @@ public class TestLease {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Test
|
||||
public void testFactory() throws Exception {
|
||||
final String[] groups = new String[]{"supergroup"};
|
||||
|
@ -264,6 +273,20 @@ public class TestLease {
|
|||
ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
|
||||
}
|
||||
|
||||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010)).when(mcp).getFileInfo(anyString());
|
||||
Mockito
|
||||
.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010))
|
||||
.when(mcp)
|
||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||
anyShort(), anyLong());
|
||||
|
||||
final Configuration conf = new Configuration();
|
||||
final DFSClient c1 = createDFSClientAs(ugi[0], conf);
|
||||
FSDataOutputStream out1 = createFsOut(c1, "/out1");
|
||||
|
|
|
@ -38,6 +38,7 @@ public class TestListFilesInDFS extends TestListFiles {
|
|||
|
||||
@BeforeClass
|
||||
public static void testSetUp() throws Exception {
|
||||
setTestPaths(new Path("/tmp/TestListFilesInDFS"));
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
fs = cluster.getFileSystem();
|
||||
fs.delete(TEST_DIR, true);
|
||||
|
|
|
@ -70,8 +70,8 @@ public class TestQuota {
|
|||
throw new DSQuotaExceededException(bytes, bytes);
|
||||
} catch(DSQuotaExceededException e) {
|
||||
|
||||
assertEquals("The DiskSpace quota is exceeded: quota=1.0k " +
|
||||
"diskspace consumed=1.0k", e.getMessage());
|
||||
assertEquals("The DiskSpace quota is exceeded: quota = 1024 B = 1 KB"
|
||||
+ " but diskspace consumed = 1024 B = 1 KB", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ public class TestRBWBlockInvalidation {
|
|||
try {
|
||||
final FSNamesystem namesystem = cluster.getNamesystem();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1");
|
||||
Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
|
||||
out = fs.create(testPath, (short) 2);
|
||||
out.writeBytes("HDFS-3157: " + testPath);
|
||||
out.hsync();
|
||||
|
|
|
@ -88,9 +88,11 @@ public class TestReplicationPolicy {
|
|||
"test.build.data", "build/test/data"), "dfs/");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
new File(baseDir, "name").getPath());
|
||||
// Enable the checking for stale datanodes in the beginning
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
|
||||
|
||||
conf.setBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
|
||||
conf.setBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
|
||||
DFSTestUtil.formatNameNode(conf);
|
||||
namenode = new NameNode(conf);
|
||||
|
||||
|
@ -100,6 +102,8 @@ public class TestReplicationPolicy {
|
|||
// construct network topology
|
||||
for (int i=0; i < NUM_OF_DATANODES; i++) {
|
||||
cluster.add(dataNodes[i]);
|
||||
bm.getDatanodeManager().getHeartbeatManager().addDatanode(
|
||||
dataNodes[i]);
|
||||
}
|
||||
for (int i=0; i < NUM_OF_DATANODES; i++) {
|
||||
dataNodes[i].updateHeartbeat(
|
||||
|
@ -393,11 +397,11 @@ public class TestReplicationPolicy {
|
|||
throws Exception {
|
||||
try {
|
||||
namenode.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
.setAvoidStaleDataNodesForWrite(true);
|
||||
.setNumStaleNodes(NUM_OF_DATANODES);
|
||||
testChooseTargetWithMoreThanAvailableNodes();
|
||||
} finally {
|
||||
namenode.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
.setAvoidStaleDataNodesForWrite(false);
|
||||
.setNumStaleNodes(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -479,12 +483,12 @@ public class TestReplicationPolicy {
|
|||
|
||||
@Test
|
||||
public void testChooseTargetWithStaleNodes() throws Exception {
|
||||
// Enable avoidng writing to stale datanodes
|
||||
namenode.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
.setAvoidStaleDataNodesForWrite(true);
|
||||
// Set dataNodes[0] as stale
|
||||
dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
|
||||
|
||||
namenode.getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
|
||||
assertTrue(namenode.getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
|
||||
DatanodeDescriptor[] targets;
|
||||
// We set the datanode[0] as stale, thus should choose datanode[1] since
|
||||
// datanode[1] is on the same rack with datanode[0] (writer)
|
||||
|
@ -503,9 +507,9 @@ public class TestReplicationPolicy {
|
|||
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
|
||||
|
||||
// reset
|
||||
namenode.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
.setAvoidStaleDataNodesForWrite(false);
|
||||
dataNodes[0].setLastUpdate(Time.now());
|
||||
namenode.getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -518,20 +522,20 @@ public class TestReplicationPolicy {
|
|||
*/
|
||||
@Test
|
||||
public void testChooseTargetWithHalfStaleNodes() throws Exception {
|
||||
// Enable stale datanodes checking
|
||||
namenode.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
.setAvoidStaleDataNodesForWrite(true);
|
||||
// Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
|
||||
for (int i = 0; i < 3; i++) {
|
||||
dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
|
||||
}
|
||||
namenode.getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
|
||||
|
||||
DatanodeDescriptor[] targets;
|
||||
targets = replicator.chooseTarget(filename, 0, dataNodes[0],
|
||||
new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
|
||||
assertEquals(targets.length, 0);
|
||||
|
||||
// We set the datanode[0] as stale, thus should choose datanode[1]
|
||||
// Since we have 6 datanodes total, stale nodes should
|
||||
// not be returned until we ask for more than 3 targets
|
||||
targets = replicator.chooseTarget(filename, 1, dataNodes[0],
|
||||
new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
|
||||
assertEquals(targets.length, 1);
|
||||
|
@ -557,18 +561,16 @@ public class TestReplicationPolicy {
|
|||
assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
|
||||
assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
|
||||
|
||||
// reset
|
||||
namenode.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
.setAvoidStaleDataNodesForWrite(false);
|
||||
for (int i = 0; i < dataNodes.length; i++) {
|
||||
dataNodes[i].setLastUpdate(Time.now());
|
||||
}
|
||||
namenode.getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
|
||||
conf.setBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
|
||||
String[] hosts = new String[]{"host1", "host2", "host3",
|
||||
|
@ -598,7 +600,7 @@ public class TestReplicationPolicy {
|
|||
.getBlockManager().getDatanodeManager().getNumStaleNodes();
|
||||
assertEquals(numStaleNodes, 2);
|
||||
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().isAvoidingStaleDataNodesForWrite());
|
||||
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
|
||||
// Call chooseTarget
|
||||
DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
|
||||
.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
|
@ -627,7 +629,7 @@ public class TestReplicationPolicy {
|
|||
// According to our strategy, stale datanodes will be included for writing
|
||||
// to avoid hotspots
|
||||
assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().isAvoidingStaleDataNodesForWrite());
|
||||
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
|
||||
// Call chooseTarget
|
||||
targets = replicator.chooseTarget(filename, 3,
|
||||
staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
|
||||
|
@ -650,7 +652,7 @@ public class TestReplicationPolicy {
|
|||
.getBlockManager().getDatanodeManager().getNumStaleNodes();
|
||||
assertEquals(numStaleNodes, 2);
|
||||
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
|
||||
.getDatanodeManager().isAvoidingStaleDataNodesForWrite());
|
||||
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
|
||||
// Call chooseTarget
|
||||
targets = replicator.chooseTarget(filename, 3,
|
||||
staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
|
||||
|
|
|
@ -506,7 +506,11 @@ public abstract class FSImageTestUtil {
|
|||
props.load(fis);
|
||||
IOUtils.closeStream(fis);
|
||||
|
||||
props.setProperty(key, value);
|
||||
if (value == null || value.isEmpty()) {
|
||||
props.remove(key);
|
||||
} else {
|
||||
props.setProperty(key, value);
|
||||
}
|
||||
|
||||
out = new FileOutputStream(versionFile);
|
||||
props.store(out, null);
|
||||
|
|
|
@ -1058,7 +1058,8 @@ public class NNThroughputBenchmark {
|
|||
throws IOException {
|
||||
ExtendedBlock prevBlock = null;
|
||||
for(int jdx = 0; jdx < blocksPerFile; jdx++) {
|
||||
LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null);
|
||||
LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
|
||||
prevBlock, null, INodeId.GRANDFATHER_INODE_ID);
|
||||
prevBlock = loc.getBlock();
|
||||
for(DatanodeInfo dnInfo : loc.getLocations()) {
|
||||
int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.spy;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
/**
|
||||
* Race between two threads simultaneously calling
|
||||
* FSNamesystem.getAdditionalBlock().
|
||||
*/
|
||||
public class TestAddBlockRetry {
|
||||
public static final Log LOG = LogFactory.getLog(TestAddBlockRetry.class);
|
||||
|
||||
private static final short REPLICATION = 3;
|
||||
|
||||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
|
||||
private int count = 0;
|
||||
private LocatedBlock lb1;
|
||||
private LocatedBlock lb2;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(REPLICATION)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retry addBlock() while another thread is in chooseTarget().
|
||||
* See HDFS-4452.
|
||||
*/
|
||||
@Test
|
||||
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
|
||||
final String src = "/testRetryAddBlockWhileInChooseTarget";
|
||||
|
||||
FSNamesystem ns = cluster.getNamesystem();
|
||||
BlockManager spyBM = spy(ns.getBlockManager());
|
||||
final NamenodeProtocols nn = cluster.getNameNodeRpc();
|
||||
|
||||
// substitute mocked BlockManager into FSNamesystem
|
||||
Class<? extends FSNamesystem> nsClass = ns.getClass();
|
||||
Field bmField = nsClass.getDeclaredField("blockManager");
|
||||
bmField.setAccessible(true);
|
||||
bmField.set(ns, spyBM);
|
||||
|
||||
doAnswer(new Answer<DatanodeDescriptor[]>() {
|
||||
@Override
|
||||
public DatanodeDescriptor[] answer(InvocationOnMock invocation)
|
||||
throws Throwable {
|
||||
LOG.info("chooseTarget for " + src);
|
||||
DatanodeDescriptor[] ret =
|
||||
(DatanodeDescriptor[]) invocation.callRealMethod();
|
||||
count++;
|
||||
if(count == 1) { // run second addBlock()
|
||||
LOG.info("Starting second addBlock for " + src);
|
||||
nn.addBlock(src, "clientName", null, null,
|
||||
INodeId.GRANDFATHER_INODE_ID);
|
||||
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
|
||||
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
|
||||
lb2 = lbs.get(0);
|
||||
assertEquals("Wrong replication",
|
||||
REPLICATION, lb2.getLocations().length);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}).when(spyBM).chooseTarget(Mockito.anyString(), Mockito.anyInt(),
|
||||
Mockito.<DatanodeDescriptor>any(), Mockito.<HashMap<Node, Node>>any(),
|
||||
Mockito.anyLong());
|
||||
|
||||
// create file
|
||||
nn.create(src, FsPermission.getFileDefault(),
|
||||
"clientName",
|
||||
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
|
||||
true, (short)3, 1024);
|
||||
|
||||
// start first addBlock()
|
||||
LOG.info("Starting first addBlock for " + src);
|
||||
nn.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID);
|
||||
|
||||
// check locations
|
||||
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
|
||||
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
|
||||
lb1 = lbs.get(0);
|
||||
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
|
||||
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URL;
|
||||
|
||||
import org.apache.commons.lang.StringEscapeUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
|
||||
*
|
||||
*/
|
||||
public class TestHostsFiles {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestHostsFiles.class.getName());
|
||||
|
||||
/*
|
||||
* Return a configuration object with low timeouts for testing and
|
||||
* a topology script set (which enables rack awareness).
|
||||
*/
|
||||
private Configuration getConf() {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
||||
// Lower the heart beat interval so the NN quickly learns of dead
|
||||
// or decommissioned DNs and the NN issues replication and invalidation
|
||||
// commands quickly (as replies to heartbeats)
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
|
||||
// Have the NN ReplicationMonitor compute the replication and
|
||||
// invalidation commands to send DNs every second.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
|
||||
// Have the NN check for pending replications every second so it
|
||||
// quickly schedules additional replicas as they are identified.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
|
||||
|
||||
// The DNs report blocks every second.
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
|
||||
|
||||
// Indicates we have multiple racks
|
||||
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
|
||||
return conf;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHostsExcludeDfshealthJsp() throws Exception {
|
||||
Configuration conf = getConf();
|
||||
short REPLICATION_FACTOR = 2;
|
||||
final Path filePath = new Path("/testFile");
|
||||
|
||||
// Configure an excludes file
|
||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
||||
Path workingDir = localFileSys.getWorkingDirectory();
|
||||
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
|
||||
Path excludeFile = new Path(dir, "exclude");
|
||||
Path includeFile = new Path(dir, "include");
|
||||
assertTrue(localFileSys.mkdirs(dir));
|
||||
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
|
||||
DFSTestUtil.writeFile(localFileSys, includeFile, "");
|
||||
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
||||
|
||||
// Two blocks and four racks
|
||||
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(racks.length).racks(racks).build();
|
||||
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
|
||||
|
||||
try {
|
||||
// Create a file with one block
|
||||
final FileSystem fs = cluster.getFileSystem();
|
||||
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
|
||||
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
|
||||
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
|
||||
|
||||
// Decommission one of the hosts with the block, this should cause
|
||||
// the block to get replicated to another host on the same rack,
|
||||
// otherwise the rack policy is violated.
|
||||
BlockLocation locs[] = fs.getFileBlockLocations(
|
||||
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
|
||||
String name = locs[0].getNames()[0];
|
||||
String names = name + "\n" + "localhost:42\n";
|
||||
LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
|
||||
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
|
||||
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
DFSTestUtil.waitForDecommission(fs, name);
|
||||
|
||||
// Check the block still has sufficient # replicas across racks
|
||||
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
|
||||
|
||||
InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
|
||||
LOG.info("nnaddr = '" + nnHttpAddress + "'");
|
||||
URL nnjsp = new URL("http://" + nnHttpAddress.getHostName() + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp");
|
||||
LOG.info("fetching " + nnjsp);
|
||||
String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp));
|
||||
LOG.info("got " + dfshealthPage);
|
||||
assertTrue("dfshealth should contain localhost, got:" + dfshealthPage,
|
||||
dfshealthPage.contains("localhost"));
|
||||
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,8 +24,10 @@ import static org.junit.Assert.fail;
|
|||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
|
@ -39,6 +41,8 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestINodeFile {
|
||||
|
@ -376,7 +380,7 @@ public class TestINodeFile {
|
|||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void TestInodeId() throws IOException {
|
||||
public void testInodeId() throws IOException {
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
|
||||
|
@ -396,9 +400,14 @@ public class TestINodeFile {
|
|||
assertTrue(fs.mkdirs(path));
|
||||
assertTrue(fsn.getLastInodeId() == 1002);
|
||||
|
||||
Path filePath = new Path("/test1/file");
|
||||
fs.create(filePath);
|
||||
// Use namenode rpc to create a file
|
||||
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
|
||||
HdfsFileStatus fileStatus = nnrpc.create("/test1/file", new FsPermission(
|
||||
(short) 0755), "client",
|
||||
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
|
||||
(short) 1, 128 * 1024 * 1024L);
|
||||
assertTrue(fsn.getLastInodeId() == 1003);
|
||||
assertTrue(fileStatus.getFileId() == 1003);
|
||||
|
||||
// Rename doesn't increase inode id
|
||||
Path renamedPath = new Path("/test2");
|
||||
|
@ -412,4 +421,44 @@ public class TestINodeFile {
|
|||
cluster.waitActive();
|
||||
assertTrue(fsn.getLastInodeId() == 1003);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteToRenamedFile() throws IOException {
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
||||
Path path = new Path("/test1");
|
||||
assertTrue(fs.mkdirs(path));
|
||||
|
||||
int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
|
||||
byte[] data = new byte[size];
|
||||
|
||||
// Create one file
|
||||
Path filePath = new Path("/test1/file");
|
||||
FSDataOutputStream fos = fs.create(filePath);
|
||||
|
||||
// Rename /test1 to test2, and recreate /test1/file
|
||||
Path renamedPath = new Path("/test2");
|
||||
fs.rename(path, renamedPath);
|
||||
fs.create(filePath, (short) 1);
|
||||
|
||||
// Add new block should fail since /test1/file has a different fileId
|
||||
try {
|
||||
fos.write(data, 0, data.length);
|
||||
// make sure addBlock() request gets to NN immediately
|
||||
fos.hflush();
|
||||
|
||||
fail("Write should fail after rename");
|
||||
} catch (Exception e) {
|
||||
/* Ignore */
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,9 +17,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.Before;
|
||||
|
@ -51,7 +54,7 @@ public class TestSecondaryNameNodeUpgrade {
|
|||
}
|
||||
}
|
||||
|
||||
private void doIt(String param, String val) throws IOException {
|
||||
private void doIt(Map<String, String> paramsToCorrupt) throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
FileSystem fs = null;
|
||||
SecondaryNameNode snn = null;
|
||||
|
@ -76,8 +79,12 @@ public class TestSecondaryNameNodeUpgrade {
|
|||
snn.shutdown();
|
||||
|
||||
for (File versionFile : versionFiles) {
|
||||
System.out.println("Changing '" + param + "' to '" + val + "' in " + versionFile);
|
||||
FSImageTestUtil.corruptVersionFile(versionFile, param, val);
|
||||
for (Map.Entry<String, String> paramToCorrupt : paramsToCorrupt.entrySet()) {
|
||||
String param = paramToCorrupt.getKey();
|
||||
String val = paramToCorrupt.getValue();
|
||||
System.out.println("Changing '" + param + "' to '" + val + "' in " + versionFile);
|
||||
FSImageTestUtil.corruptVersionFile(versionFile, param, val);
|
||||
}
|
||||
}
|
||||
|
||||
snn = new SecondaryNameNode(conf);
|
||||
|
@ -94,13 +101,19 @@ public class TestSecondaryNameNodeUpgrade {
|
|||
|
||||
@Test
|
||||
public void testUpgradeLayoutVersionSucceeds() throws IOException {
|
||||
doIt("layoutVersion", "-39");
|
||||
doIt(ImmutableMap.of("layoutVersion", "-39"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpgradePreFedSucceeds() throws IOException {
|
||||
doIt(ImmutableMap.of("layoutVersion", "-19", "clusterID", "",
|
||||
"blockpoolID", ""));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testChangeNsIDFails() throws IOException {
|
||||
try {
|
||||
doIt("namespaceID", "2");
|
||||
doIt(ImmutableMap.of("namespaceID", "2"));
|
||||
Assert.fail("Should throw InconsistentFSStateException");
|
||||
} catch(IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e);
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TestNameNodeMetrics {
|
|||
CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
|
||||
"" + PERCENTILES_INTERVAL);
|
||||
// Enable stale DataNodes checking
|
||||
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
|
||||
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
|
||||
((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
|
||||
.getLogger().setLevel(Level.DEBUG);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -42,9 +43,10 @@ public class TestJsonUtil {
|
|||
public void testHdfsFileStatus() {
|
||||
final long now = Time.now();
|
||||
final String parent = "/dir";
|
||||
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L<<26,
|
||||
now, now + 10, new FsPermission((short)0644), "user", "group",
|
||||
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"));
|
||||
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
|
||||
now, now + 10, new FsPermission((short) 0644), "user", "group",
|
||||
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
|
||||
INodeId.GRANDFATHER_INODE_ID);
|
||||
final FileStatus fstatus = toFileStatus(status, parent);
|
||||
System.out.println("status = " + status);
|
||||
System.out.println("fstatus = " + fstatus);
|
||||
|
|
|
@ -1182,7 +1182,7 @@
|
|||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^1\.0k\s+hdfs:///dir0/data1k</expected-output>
|
||||
<expected-output>^1\.0 K\s+hdfs:///dir0/data1k</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
@ -15590,7 +15590,7 @@
|
|||
<comparators>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>put: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]*</expected-output>
|
||||
<expected-output>put: The DiskSpace quota of /dir1 is exceeded: quota = 1024 B = 1 KB but diskspace consumed = [0-9]+ B = [0-9.]+ [KMG]B*</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
|
|
@ -152,7 +152,22 @@ Trunk (Unreleased)
|
|||
MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing
|
||||
queue configuration. (Chris Nauroth via suresh)
|
||||
|
||||
Release 2.0.3-alpha - Unreleased
|
||||
Release 2.0.4-beta - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
MAPREDUCE-4671. AM does not tell the RM about container requests which are
|
||||
no longer needed. (Bikas Saha via sseth)
|
||||
|
||||
Release 2.0.3-alpha - 2013-02-06
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -215,6 +230,12 @@ Release 2.0.3-alpha - Unreleased
|
|||
MAPREDUCE-4838. Add additional fields like Locality, Avataar to the
|
||||
JobHistory logs. (Zhijie Shen via sseth)
|
||||
|
||||
MAPREDUCE-4971. Minor extensibility enhancements to Counters &
|
||||
FileOutputFormat. (Arun C Murthy via sseth)
|
||||
|
||||
MAPREDUCE-4977. Documentation for pluggable shuffle and pluggable sort.
|
||||
(tucu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
MAPREDUCE-4893. Fixed MR ApplicationMaster to do optimal assignment of
|
||||
|
@ -284,6 +305,8 @@ Release 2.0.3-alpha - Unreleased
|
|||
MAPREDUCE-4969. TestKeyValueTextInputFormat test fails with Open JDK 7.
|
||||
(Arpit Agarwal via suresh)
|
||||
|
||||
MAPREDUCE-4953. HadoopPipes misuses fprintf. (Andy Isaacson via atm)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -668,11 +691,17 @@ Release 0.23.7 - UNRELEASED
|
|||
|
||||
IMPROVEMENTS
|
||||
|
||||
MAPREDUCE-4905. test org.apache.hadoop.mapred.pipes
|
||||
(Aleksey Gorshkov via bobby)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the
|
||||
number of map completion event type conversions. (Jason Lowe via sseth)
|
||||
|
||||
MAPREDUCE-4822. Unnecessary conversions in History Events. (Chu Tong via
|
||||
jlowe)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
MAPREDUCE-4458. Warn if java.library.path is used for AM or Task
|
||||
|
|
|
@ -72,7 +72,10 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
remoteRequestsTable =
|
||||
new TreeMap<Priority, Map<String, Map<Resource, ResourceRequest>>>();
|
||||
|
||||
private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>();
|
||||
// use custom comparator to make sure ResourceRequest objects differing only in
|
||||
// numContainers dont end up as duplicates
|
||||
private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
|
||||
new org.apache.hadoop.yarn.util.BuilderUtils.ResourceRequestComparator());
|
||||
private final Set<ContainerId> release = new TreeSet<ContainerId>();
|
||||
|
||||
private boolean nodeBlacklistingEnabled;
|
||||
|
@ -235,7 +238,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
ResourceRequest zeroedRequest = BuilderUtils.newResourceRequest(req);
|
||||
zeroedRequest.setNumContainers(0);
|
||||
// to be sent to RM on next heartbeat
|
||||
ask.add(zeroedRequest);
|
||||
addResourceRequestToAsk(zeroedRequest);
|
||||
}
|
||||
}
|
||||
// if all requests were still in ask queue
|
||||
|
@ -320,7 +323,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1);
|
||||
|
||||
// Note this down for next interaction with ResourceManager
|
||||
ask.add(remoteRequest);
|
||||
addResourceRequestToAsk(remoteRequest);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("addResourceRequest:" + " applicationId="
|
||||
+ applicationId.getId() + " priority=" + priority.getPriority()
|
||||
|
@ -353,7 +356,12 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
+ remoteRequest.getNumContainers() + " #asks=" + ask.size());
|
||||
}
|
||||
|
||||
remoteRequest.setNumContainers(remoteRequest.getNumContainers() -1);
|
||||
if(remoteRequest.getNumContainers() > 0) {
|
||||
// based on blacklisting comments above we can end up decrementing more
|
||||
// than requested. so guard for that.
|
||||
remoteRequest.setNumContainers(remoteRequest.getNumContainers() -1);
|
||||
}
|
||||
|
||||
if (remoteRequest.getNumContainers() == 0) {
|
||||
reqMap.remove(capability);
|
||||
if (reqMap.size() == 0) {
|
||||
|
@ -362,13 +370,12 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
if (remoteRequests.size() == 0) {
|
||||
remoteRequestsTable.remove(priority);
|
||||
}
|
||||
//remove from ask if it may have
|
||||
ask.remove(remoteRequest);
|
||||
} else {
|
||||
ask.add(remoteRequest);//this will override the request if ask doesn't
|
||||
//already have it.
|
||||
}
|
||||
|
||||
// send the updated resource request to RM
|
||||
// send 0 container count requests also to cancel previous requests
|
||||
addResourceRequestToAsk(remoteRequest);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("AFTER decResourceRequest:" + " applicationId="
|
||||
+ applicationId.getId() + " priority=" + priority.getPriority()
|
||||
|
@ -376,6 +383,16 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
+ remoteRequest.getNumContainers() + " #asks=" + ask.size());
|
||||
}
|
||||
}
|
||||
|
||||
private void addResourceRequestToAsk(ResourceRequest remoteRequest) {
|
||||
// because objects inside the resource map can be deleted ask can end up
|
||||
// containing an object that matches new resource object but with different
|
||||
// numContainers. So exisintg values must be replaced explicitly
|
||||
if(ask.contains(remoteRequest)) {
|
||||
ask.remove(remoteRequest);
|
||||
}
|
||||
ask.add(remoteRequest);
|
||||
}
|
||||
|
||||
protected void release(ContainerId containerId) {
|
||||
release.add(containerId);
|
||||
|
|
|
@ -167,6 +167,7 @@ public class TestRMContainerAllocator {
|
|||
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
|
||||
dispatcher.await();
|
||||
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
|
||||
Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());
|
||||
|
||||
// send another request with different resource and priority
|
||||
ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
|
||||
|
@ -178,7 +179,8 @@ public class TestRMContainerAllocator {
|
|||
assigned = allocator.schedule();
|
||||
dispatcher.await();
|
||||
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
|
||||
|
||||
Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
|
||||
|
||||
// update resources in scheduler
|
||||
nodeManager1.nodeHeartbeat(true); // Node heartbeat
|
||||
nodeManager2.nodeHeartbeat(true); // Node heartbeat
|
||||
|
@ -187,8 +189,14 @@ public class TestRMContainerAllocator {
|
|||
|
||||
assigned = allocator.schedule();
|
||||
dispatcher.await();
|
||||
Assert.assertEquals(0, rm.getMyFifoScheduler().lastAsk.size());
|
||||
checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
|
||||
assigned, false);
|
||||
|
||||
// check that the assigned container requests are cancelled
|
||||
assigned = allocator.schedule();
|
||||
dispatcher.await();
|
||||
Assert.assertEquals(5, rm.getMyFifoScheduler().lastAsk.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -422,7 +430,7 @@ public class TestRMContainerAllocator {
|
|||
}
|
||||
|
||||
private static class MyResourceManager extends MockRM {
|
||||
|
||||
|
||||
public MyResourceManager(Configuration conf) {
|
||||
super(conf);
|
||||
}
|
||||
|
@ -446,6 +454,10 @@ public class TestRMContainerAllocator {
|
|||
protected ResourceScheduler createScheduler() {
|
||||
return new MyFifoScheduler(this.getRMContext());
|
||||
}
|
||||
|
||||
MyFifoScheduler getMyFifoScheduler() {
|
||||
return (MyFifoScheduler) scheduler;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -1194,7 +1206,9 @@ public class TestRMContainerAllocator {
|
|||
assert (false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
List<ResourceRequest> lastAsk = null;
|
||||
|
||||
// override this to copy the objects otherwise FifoScheduler updates the
|
||||
// numContainers in same objects as kept by RMContainerAllocator
|
||||
@Override
|
||||
|
@ -1208,6 +1222,7 @@ public class TestRMContainerAllocator {
|
|||
.getNumContainers());
|
||||
askCopy.add(reqCopy);
|
||||
}
|
||||
lastAsk = ask;
|
||||
return super.allocate(applicationAttemptId, askCopy, release);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ import org.apache.hadoop.yarn.Clock;
|
|||
import org.apache.hadoop.yarn.SystemClock;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
|
||||
import org.apache.hadoop.yarn.event.InlineDispatcher;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
|
|
@ -230,6 +230,10 @@ public class Counters
|
|||
public static class Group implements CounterGroupBase<Counter> {
|
||||
private CounterGroupBase<Counter> realGroup;
|
||||
|
||||
protected Group() {
|
||||
realGroup = null;
|
||||
}
|
||||
|
||||
Group(GenericGroup group) {
|
||||
this.realGroup = group;
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public class FileOutputCommitter extends OutputCommitter {
|
|||
}
|
||||
|
||||
@Private
|
||||
Path getTaskAttemptPath(TaskAttemptContext context) throws IOException {
|
||||
public Path getTaskAttemptPath(TaskAttemptContext context) throws IOException {
|
||||
Path out = getOutputPath(context);
|
||||
return out == null ? null : getTaskAttemptPath(context, out);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
import java.text.NumberFormat;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -152,8 +153,8 @@ public abstract class FileOutputFormat<K, V> implements OutputFormat<K, V> {
|
|||
* @param outputDir the {@link Path} of the output directory
|
||||
* for the map-reduce job.
|
||||
*/
|
||||
|
||||
static void setWorkOutputPath(JobConf conf, Path outputDir) {
|
||||
@Private
|
||||
public static void setWorkOutputPath(JobConf conf, Path outputDir) {
|
||||
outputDir = new Path(conf.getWorkingDirectory(), outputDir);
|
||||
conf.set(JobContext.TASK_OUTPUT_DIR, outputDir.toString());
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.regex.Pattern;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
|
@ -419,7 +420,8 @@ public class JobConf extends Configuration {
|
|||
return credentials;
|
||||
}
|
||||
|
||||
void setCredentials(Credentials credentials) {
|
||||
@Private
|
||||
public void setCredentials(Credentials credentials) {
|
||||
this.credentials = credentials;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.mapred.lib.LazyOutputFormat;
|
|||
import org.apache.hadoop.mapred.lib.NullOutputFormat;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
|
||||
import org.apache.hadoop.util.ExitUtil;
|
||||
import org.apache.hadoop.util.GenericOptionsParser;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
|
||||
|
@ -515,7 +516,7 @@ public class Submitter extends Configured implements Tool {
|
|||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
int exitCode = new Submitter().run(args);
|
||||
System.exit(exitCode);
|
||||
ExitUtil.terminate(exitCode);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import com.google.common.collect.Iterators;
|
|||
import com.google.common.collect.Maps;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.mapreduce.Counter;
|
||||
import org.apache.hadoop.mapreduce.FileSystemCounter;
|
||||
|
@ -72,6 +73,16 @@ public abstract class FileSystemCounterGroup<C extends Counter>
|
|||
this.scheme = scheme;
|
||||
key = ref;
|
||||
}
|
||||
|
||||
@Private
|
||||
public String getScheme() {
|
||||
return scheme;
|
||||
}
|
||||
|
||||
@Private
|
||||
public FileSystemCounter getFileSystemCounter() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.Iterator;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.mapreduce.Counter;
|
||||
import org.apache.hadoop.mapreduce.util.ResourceBundles;
|
||||
|
@ -66,7 +67,17 @@ public abstract class FrameworkCounterGroup<T extends Enum<T>,
|
|||
key = ref;
|
||||
this.groupName = groupName;
|
||||
}
|
||||
|
||||
@Private
|
||||
public T getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Private
|
||||
public String getGroupName() {
|
||||
return groupName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return key.name();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue