Merge trunk into HDFS-347 branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-347@1446832 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2013-02-16 01:12:07 +00:00
commit f61581501a
215 changed files with 6810 additions and 3684 deletions

View File

@ -91,13 +91,19 @@
<includes>
<include>${project.artifactId}-${project.version}.jar</include>
<include>${project.artifactId}-${project.version}-tests.jar</include>
<include>${project.artifactId}-${project.version}-sources.jar</include>
<include>${project.artifactId}-${project.version}-test-sources.jar</include>
</includes>
<excludes>
<exclude>hadoop-tools-dist-*.jar</exclude>
</excludes>
</fileSet>
<fileSet>
<directory>${project.build.directory}</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
<includes>
<include>${project.artifactId}-${project.version}-sources.jar</include>
<include>${project.artifactId}-${project.version}-test-sources.jar</include>
</includes>
</fileSet>
<fileSet>
<directory>${basedir}/dev-support/jdiff</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory>

View File

@ -146,8 +146,9 @@ Trunk (Unreleased)
HADOOP-9162. Add utility to check native library availability.
(Binglin Chang via suresh)
HADOOP-8924. Add maven plugin alternative to shell script to save
package-info.java. (Chris Nauroth via suresh)
HADOOP-9277. Improve javadoc for FileContext. (Andrew Wang via suresh)
HADOOP-9218 Document the Rpc-wrappers used internally (sanjay Radia)
BUG FIXES
@ -319,24 +320,60 @@ Trunk (Unreleased)
HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds
a new module to the build (Chris Nauroth via bobby)
HADOOP-9245. mvn clean without running mvn install before fails.
(Karthik Kambatla via suresh)
HADOOP-9249. hadoop-maven-plugins version-info goal causes build failure
when running with Clover. (Chris Nauroth via suresh)
HADOOP-9264. Port change to use Java untar API on Windows from
branch-1-win to trunk. (Chris Nauroth via suresh)
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
IMPROVEMENTS
HADOOP-9253. Capture ulimit info in the logs at service start time.
(Arpit Gupta via suresh)
HADOOP-8924. Add maven plugin alternative to shell script to save
package-info.java. (Chris Nauroth via suresh)
HADOOP-9117. replace protoc ant plugin exec with a maven plugin. (tucu)
OPTIMIZATIONS
BUG FIXES
HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
HADOOP-9305. Add support for running the Hadoop client on 64-bit AIX. (atm)
HADOOP-9245. mvn clean without running mvn install before fails.
(Karthik Kambatla via suresh)
HADOOP-9246 Execution phase for hadoop-maven-plugin should be
process-resources (Karthik Kambatla and Chris Nauroth via jlowe)
HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
HADOOP-9297. remove old record IO generation and tests. (tucu)
Release 2.0.3-alpha - Unreleased
HADOOP-9154. SortedMapWritable#putAll() doesn't add key/value classes to
the map. (Karthik Kambatla via tomwhite)
HADOOP-9304. remove addition of avro genreated-sources dirs to build. (tucu)
Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES
@ -464,6 +501,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9231. Parametrize staging URL for the uniformity of
distributionManagement. (Konstantin Boudnik via suresh)
HADOOP-9276. Allow BoundedByteArrayOutputStream to be resettable.
(Arun Murthy via hitesh)
OPTIMIZATIONS
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@ -588,6 +628,24 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9221. Convert remaining xdocs to APT. (Andy Isaacson via atm)
HADOOP-8981. TestMetricsSystemImpl fails on Windows. (Xuan Gong via suresh)
HADOOP-9124. SortedMapWritable violates contract of Map interface for
equals() and hashCode(). (Surenkumar Nihalani via tomwhite)
HADOOP-9252. In StringUtils, humanReadableInt(..) has a race condition and
the synchronization of limitDecimalTo2(double) can be avoided. (szetszwo)
HADOOP-9260. Hadoop version may be not correct when starting name node or
data node. (Chris Nauroth via jlowe)
HADOOP-9278. Fix the file handle leak in HarMetaData.parseMetaData() in
HarFileSystem. (Chris Nauroth via szetszwo)
HADOOP-9289. FsShell rm -f fails for non-matching globs. (Daryn Sharp via
suresh)
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
@ -1289,10 +1347,19 @@ Release 0.23.7 - UNRELEASED
HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
permissions (Ivan A. Veselovsky via bobby)
HADOOP-9067. provide test for LocalFileSystem.reportChecksumFailure
(Ivan A. Veselovsky via bobby)
OPTIMIZATIONS
BUG FIXES
HADOOP-9302. HDFS docs not linked from top level (Andy Isaacson via
tgraves)
HADOOP-9303. command manual dfsadmin missing entry for restoreFailedStorage
option (Andy Isaacson via tgraves)
Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -286,6 +286,10 @@
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.security\.proto\.SecurityProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.TestProtos.*"/>
</Match>
<!--
Manually checked, misses child thread manually syncing on parent's intrinsic lock.

View File

@ -241,6 +241,11 @@
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
<version>1.4</version>
</dependency>
</dependencies>
<build>
@ -288,6 +293,51 @@
</source>
</configuration>
</execution>
<execution>
<id>compile-protoc</id>
<phase>generate-sources</phase>
<goals>
<goal>protoc</goal>
</goals>
<configuration>
<imports>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>HAServiceProtocol.proto</include>
<include>IpcConnectionContext.proto</include>
<include>ProtocolInfo.proto</include>
<include>RpcHeader.proto</include>
<include>ZKFCProtocol.proto</include>
<include>ProtobufRpcEngine.proto</include>
<include>Security.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
</configuration>
</execution>
<execution>
<id>compile-test-protoc</id>
<phase>generate-test-sources</phase>
<goals>
<goal>protoc</goal>
</goals>
<configuration>
<imports>
<param>${basedir}/src/test/proto</param>
</imports>
<source>
<directory>${basedir}/src/test/proto</directory>
<includes>
<include>test.proto</include>
<include>test_rpc_service.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-test-sources/java</output>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
@ -326,39 +376,6 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>create-protobuf-generated-sources-directory</id>
<phase>initialize</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="target/generated-sources/java" />
<mkdir dir="target/generated-test-sources/java" />
</target>
</configuration>
</execution>
<execution>
<id>generate-test-sources</id>
<phase>generate-test-sources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="${project.build.directory}/generated-test-sources/java"/>
<taskdef name="recordcc" classname="org.apache.hadoop.record.compiler.ant.RccTask">
<classpath refid="maven.compile.classpath"/>
</taskdef>
<recordcc destdir="${project.build.directory}/generated-test-sources/java">
<fileset dir="${basedir}/src/test/ddl" includes="**/*.jr"/>
</recordcc>
</target>
</configuration>
</execution>
<execution>
<id>create-log-dir</id>
<phase>process-test-resources</phase>
@ -381,6 +398,23 @@
</target>
</configuration>
</execution>
<execution>
<id>copy-test-tarballs</id>
<phase>process-test-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<copy toDir="${test.cache.data}">
<fileset dir="${basedir}/src/test/java/org/apache/hadoop/fs">
<include name="test-untar.tar"/>
<include name="test-untar.tgz"/>
</fileset>
</copy>
</target>
</configuration>
</execution>
<execution>
<phase>pre-site</phase>
<goals>
@ -395,79 +429,6 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<executions>
<execution>
<id>compile-proto</id>
<phase>generate-sources</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>protoc</executable>
<arguments>
<argument>-Isrc/main/proto/</argument>
<argument>--java_out=target/generated-sources/java</argument>
<argument>src/main/proto/HAServiceProtocol.proto</argument>
<argument>src/main/proto/IpcConnectionContext.proto</argument>
<argument>src/main/proto/ProtocolInfo.proto</argument>
<argument>src/main/proto/RpcHeader.proto</argument>
<argument>src/main/proto/ZKFCProtocol.proto</argument>
<argument>src/main/proto/ProtobufRpcEngine.proto</argument>
<argument>src/main/proto/Security.proto</argument>
</arguments>
</configuration>
</execution>
<execution>
<id>compile-test-proto</id>
<phase>generate-test-sources</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>protoc</executable>
<arguments>
<argument>-Isrc/test/proto/</argument>
<argument>--java_out=target/generated-test-sources/java</argument>
<argument>src/test/proto/test.proto</argument>
<argument>src/test/proto/test_rpc_service.proto</argument>
</arguments>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>add-source</id>
<phase>generate-sources</phase>
<goals>
<goal>add-source</goal>
</goals>
<configuration>
<sources>
<source>${project.build.directory}/generated-sources/java</source>
</sources>
</configuration>
</execution>
<execution>
<id>add-test-source</id>
<phase>generate-sources</phase>
<goals>
<goal>add-test-source</goal>
</goals>
<configuration>
<sources>
<source>${project.build.directory}/generated-test-sources/java</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
@ -485,6 +446,7 @@
<exclude>src/test/all-tests</exclude>
<exclude>src/test/resources/kdc/ldif/users.ldif</exclude>
<exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c</exclude>
<exclude>src/test/java/org/apache/hadoop/fs/test-untar.tgz</exclude>
</excludes>
</configuration>
</plugin>

View File

@ -84,6 +84,7 @@ if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_
export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
starting_secure_dn="true"
fi
if [ "$HADOOP_IDENT_STRING" = "" ]; then
@ -154,7 +155,17 @@ case $startStop in
;;
esac
echo $! > $pid
sleep 1; head "$log"
sleep 1
# capture the ulimit output
if [ "true" = "$starting_secure_dn" ]; then
echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
# capture the ulimit info for the appropriate user
su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
else
echo "ulimit -a for user $USER" >> $log
ulimit -a >> $log 2>&1
fi
head -30 "$log"
sleep 3;
if ! ps -p $! > /dev/null ; then
exit 1

View File

@ -57,70 +57,60 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ShutdownHookManager;
/**
* The FileContext class provides an interface to the application writer for
* using the Hadoop file system.
* It provides a set of methods for the usual operation: create, open,
* list, etc
* The FileContext class provides an interface for users of the Hadoop
* file system. It exposes a number of file system operations, e.g. create,
* open, list.
*
* <p>
* <b> *** Path Names *** </b>
* <p>
* <h2>Path Names</h2>
*
* The Hadoop file system supports a URI name space and URI names.
* It offers a forest of file systems that can be referenced using fully
* qualified URIs.
* Two common Hadoop file systems implementations are
* The Hadoop file system supports a URI namespace and URI names. This enables
* multiple types of file systems to be referenced using fully-qualified URIs.
* Two common Hadoop file system implementations are
* <ul>
* <li>the local file system: file:///path
* <li> the hdfs file system hdfs://nnAddress:nnPort/path
* <li>the HDFS file system: hdfs://nnAddress:nnPort/path
* </ul>
*
* While URI names are very flexible, it requires knowing the name or address
* of the server. For convenience one often wants to access the default system
* in one's environment without knowing its name/address. This has an
* additional benefit that it allows one to change one's default fs
* (e.g. admin moves application from cluster1 to cluster2).
* The Hadoop file system also supports additional naming schemes besides URIs.
* Hadoop has the concept of a <i>default file system</i>, which implies a
* default URI scheme and authority. This enables <i>slash-relative names</i>
* relative to the default FS, which are more convenient for users and
* application writers. The default FS is typically set by the user's
* environment, though it can also be manually specified.
* <p>
*
* To facilitate this, Hadoop supports a notion of a default file system.
* The user can set his default file system, although this is
* typically set up for you in your environment via your default config.
* A default file system implies a default scheme and authority; slash-relative
* names (such as /for/bar) are resolved relative to that default FS.
* Similarly a user can also have working-directory-relative names (i.e. names
* not starting with a slash). While the working directory is generally in the
* same default FS, the wd can be in a different FS.
* Hadoop also supports <i>working-directory-relative</i> names, which are paths
* relative to the current working directory (similar to Unix). The working
* directory can be in a different file system than the default FS.
* <p>
* Hence Hadoop path names can be one of:
* Thus, Hadoop path names can be specified as one of the following:
* <ul>
* <li> fully qualified URI: scheme://authority/path
* <li> slash relative names: /path relative to the default file system
* <li> wd-relative names: path relative to the working dir
* <li>a fully-qualified URI: scheme://authority/path (e.g.
* hdfs://nnAddress:nnPort/foo/bar)
* <li>a slash-relative name: path relative to the default file system (e.g.
* /foo/bar)
* <li>a working-directory-relative name: path relative to the working dir (e.g.
* foo/bar)
* </ul>
* Relative paths with scheme (scheme:foo/bar) are illegal.
*
* <p>
* <b>****The Role of the FileContext and configuration defaults****</b>
* <p>
* The FileContext provides file namespace context for resolving file names;
* it also contains the umask for permissions, In that sense it is like the
* per-process file-related state in Unix system.
* These two properties
* <ul>
* <li> default file system i.e your slash)
* <li> umask
* </ul>
* in general, are obtained from the default configuration file
* in your environment, (@see {@link Configuration}).
* <h2>Role of FileContext and Configuration Defaults</h2>
*
* No other configuration parameters are obtained from the default config as
* far as the file context layer is concerned. All file system instances
* (i.e. deployments of file systems) have default properties; we call these
* server side (SS) defaults. Operation like create allow one to select many
* properties: either pass them in as explicit parameters or use
* the SS properties.
* The FileContext is the analogue of per-process file-related state in Unix. It
* contains two properties:
*
* <ul>
* <li>the default file system (for resolving slash-relative names)
* <li>the umask (for file permissions)
* </ul>
* In general, these properties are obtained from the default configuration file
* in the user's environment (see {@link Configuration}).
*
* Further file system properties are specified on the server-side. File system
* operations default to using these server-side defaults unless otherwise
* specified.
* <p>
* The file system related SS defaults are
* The file system related server-side defaults are:
* <ul>
* <li> the home directory (default is "/user/userName")
* <li> the initial wd (only for local fs)
@ -131,34 +121,34 @@ import org.apache.hadoop.util.ShutdownHookManager;
* <li> checksum option. (checksumType and bytesPerChecksum)
* </ul>
*
* <p>
* <b> *** Usage Model for the FileContext class *** </b>
* <p>
* <h2>Example Usage</h2>
*
* Example 1: use the default config read from the $HADOOP_CONFIG/core.xml.
* Unspecified values come from core-defaults.xml in the release jar.
* <ul>
* <li> myFContext = FileContext.getFileContext(); // uses the default config
* // which has your default FS
* <li> myFContext.create(path, ...);
* <li> myFContext.setWorkingDir(path)
* <li> myFContext.setWorkingDir(path);
* <li> myFContext.open (path, ...);
* <li>...
* </ul>
* Example 2: Get a FileContext with a specific URI as the default FS
* <ul>
* <li> myFContext = FileContext.getFileContext(URI)
* <li> myFContext = FileContext.getFileContext(URI);
* <li> myFContext.create(path, ...);
* ...
* <li>...
* </ul>
* Example 3: FileContext with local file system as the default
* <ul>
* <li> myFContext = FileContext.getLocalFSFileContext()
* <li> myFContext = FileContext.getLocalFSFileContext();
* <li> myFContext.create(path, ...);
* <li> ...
* </ul>
* Example 4: Use a specific config, ignoring $HADOOP_CONFIG
* Generally you should not need use a config unless you are doing
* <ul>
* <li> configX = someConfigSomeOnePassedToYou.
* <li> configX = someConfigSomeOnePassedToYou;
* <li> myFContext = getFileContext(configX); // configX is not changed,
* // is passed down
* <li> myFContext.create(path, ...);

View File

@ -21,9 +21,12 @@ package org.apache.hadoop.fs;
import java.io.*;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -630,8 +633,22 @@ public class FileUtil {
}
}
StringBuilder untarCommand = new StringBuilder();
boolean gzipped = inFile.toString().endsWith("gz");
if(Shell.WINDOWS) {
// Tar is not native to Windows. Use simple Java based implementation for
// tests and simple tar archives
unTarUsingJava(inFile, untarDir, gzipped);
}
else {
// spawn tar utility to untar archive for full fledged unix behavior such
// as resolving symlinks in tar archives
unTarUsingTar(inFile, untarDir, gzipped);
}
}
private static void unTarUsingTar(File inFile, File untarDir,
boolean gzipped) throws IOException {
StringBuffer untarCommand = new StringBuffer();
if (gzipped) {
untarCommand.append(" gzip -dc '");
untarCommand.append(FileUtil.makeShellPath(inFile));
@ -657,6 +674,61 @@ public class FileUtil {
}
}
private static void unTarUsingJava(File inFile, File untarDir,
boolean gzipped) throws IOException {
InputStream inputStream = null;
if (gzipped) {
inputStream = new BufferedInputStream(new GZIPInputStream(
new FileInputStream(inFile)));
} else {
inputStream = new BufferedInputStream(new FileInputStream(inFile));
}
TarArchiveInputStream tis = new TarArchiveInputStream(inputStream);
for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
unpackEntries(tis, entry, untarDir);
entry = tis.getNextTarEntry();
}
}
private static void unpackEntries(TarArchiveInputStream tis,
TarArchiveEntry entry, File outputDir) throws IOException {
if (entry.isDirectory()) {
File subDir = new File(outputDir, entry.getName());
if (!subDir.mkdir() && !subDir.isDirectory()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
for (TarArchiveEntry e : entry.getDirectoryEntries()) {
unpackEntries(tis, e, subDir);
}
return;
}
File outputFile = new File(outputDir, entry.getName());
if (!outputDir.exists()) {
if (!outputDir.mkdirs()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
}
int count;
byte data[] = new byte[2048];
BufferedOutputStream outputStream = new BufferedOutputStream(
new FileOutputStream(outputFile));
while ((count = tis.read(data)) != -1) {
outputStream.write(data, 0, count);
}
outputStream.flush();
outputStream.close();
}
/**
* Class for creating hardlinks.
* Supports Unix, Cygwin, WindXP.

View File

@ -30,8 +30,11 @@ import java.util.TreeMap;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable;
@ -50,6 +53,9 @@ import org.apache.hadoop.util.Progressable;
*/
public class HarFileSystem extends FilterFileSystem {
private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
public static final int VERSION = 3;
private static final Map<URI, HarMetaData> harMetaCache =
@ -1025,12 +1031,18 @@ public class HarFileSystem extends FilterFileSystem {
}
private void parseMetaData() throws IOException {
FSDataInputStream in = fs.open(masterIndexPath);
Text line;
long read;
FSDataInputStream in = null;
LineReader lin = null;
try {
in = fs.open(masterIndexPath);
FileStatus masterStat = fs.getFileStatus(masterIndexPath);
masterIndexTimestamp = masterStat.getModificationTime();
LineReader lin = new LineReader(in, getConf());
Text line = new Text();
long read = lin.readLine(line);
lin = new LineReader(in, getConf());
line = new Text();
read = lin.readLine(line);
// the first line contains the version of the index file
String versionLine = line.toString();
@ -1055,14 +1067,12 @@ public class HarFileSystem extends FilterFileSystem {
endHash));
line.clear();
}
try {
// close the master index
lin.close();
} catch(IOException io){
// do nothing just a read.
} finally {
IOUtils.cleanup(LOG, lin, in);
}
FSDataInputStream aIn = fs.open(archiveIndexPath);
try {
FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
archiveIndexTimestamp = archiveStat.getModificationTime();
LineReader aLin;
@ -1082,11 +1092,8 @@ public class HarFileSystem extends FilterFileSystem {
line.clear();
}
}
try {
// close the archive index
aIn.close();
} catch(IOException io) {
// do nothing just a read.
} finally {
IOUtils.cleanup(LOG, aIn);
}
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.fs.shell;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -28,6 +29,7 @@ import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.PathIsDirectoryException;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.fs.Trash;
/**
@ -71,6 +73,19 @@ class Delete {
skipTrash = cf.getOpt("skipTrash");
}
@Override
protected List<PathData> expandArgument(String arg) throws IOException {
try {
return super.expandArgument(arg);
} catch (PathNotFoundException e) {
if (!ignoreFNF) {
throw e;
}
// prevent -f on a non-existent glob from failing
return new LinkedList<PathData>();
}
}
@Override
protected void processNonexistentPath(PathData item) throws IOException {
if (!ignoreFNF) super.processNonexistentPath(item);

View File

@ -48,7 +48,7 @@ class FsUsage extends FsCommand {
protected String formatSize(long size) {
return humanReadable
? StringUtils.humanReadableInt(size)
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}

View File

@ -67,7 +67,7 @@ class Ls extends FsCommand {
protected boolean humanReadable = false;
protected String formatSize(long size) {
return humanReadable
? StringUtils.humanReadableInt(size)
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}

View File

@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import com.google.common.annotations.VisibleForTesting;
/**
* Abstract base class for MapWritable and SortedMapWritable
*
@ -45,10 +47,12 @@ public abstract class AbstractMapWritable implements Writable, Configurable {
private AtomicReference<Configuration> conf;
/* Class to id mappings */
private Map<Class, Byte> classToIdMap = new ConcurrentHashMap<Class, Byte>();
@VisibleForTesting
Map<Class, Byte> classToIdMap = new ConcurrentHashMap<Class, Byte>();
/* Id to Class mappings */
private Map<Byte, Class> idToClassMap = new ConcurrentHashMap<Byte, Class>();
@VisibleForTesting
Map<Byte, Class> idToClassMap = new ConcurrentHashMap<Byte, Class>();
/* The number of new classes (those not established by the constructor) */
private volatile byte newClasses = 0;

View File

@ -32,9 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class BoundedByteArrayOutputStream extends OutputStream {
private final byte[] buffer;
private byte[] buffer;
private int startOffset;
private int limit;
private int count;
private int currentPointer;
/**
* Create a BoundedByteArrayOutputStream with the specified
@ -52,20 +53,30 @@ public class BoundedByteArrayOutputStream extends OutputStream {
* @param limit The maximum limit upto which data can be written
*/
public BoundedByteArrayOutputStream(int capacity, int limit) {
this(new byte[capacity], 0, limit);
}
protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
resetBuffer(buf, offset, limit);
}
protected void resetBuffer(byte[] buf, int offset, int limit) {
int capacity = buf.length - offset;
if ((capacity < limit) || (capacity | limit) < 0) {
throw new IllegalArgumentException("Invalid capacity/limit");
}
this.buffer = new byte[capacity];
this.limit = limit;
this.count = 0;
this.buffer = buf;
this.startOffset = offset;
this.currentPointer = offset;
this.limit = offset + limit;
}
@Override
public void write(int b) throws IOException {
if (count >= limit) {
if (currentPointer >= limit) {
throw new EOFException("Reaching the limit of the buffer.");
}
buffer[count++] = (byte) b;
buffer[currentPointer++] = (byte) b;
}
@Override
@ -77,12 +88,12 @@ public class BoundedByteArrayOutputStream extends OutputStream {
return;
}
if (count + len > limit) {
if (currentPointer + len > limit) {
throw new EOFException("Reach the limit of the buffer");
}
System.arraycopy(b, off, buffer, count, len);
count += len;
System.arraycopy(b, off, buffer, currentPointer, len);
currentPointer += len;
}
/**
@ -90,17 +101,17 @@ public class BoundedByteArrayOutputStream extends OutputStream {
* @param newlim New Limit
*/
public void reset(int newlim) {
if (newlim > buffer.length) {
if (newlim > (buffer.length - startOffset)) {
throw new IndexOutOfBoundsException("Limit exceeds buffer size");
}
this.limit = newlim;
this.count = 0;
this.currentPointer = startOffset;
}
/** Reset the buffer */
public void reset() {
this.limit = buffer.length;
this.count = 0;
this.limit = buffer.length - startOffset;
this.currentPointer = startOffset;
}
/** Return the current limit */
@ -119,6 +130,10 @@ public class BoundedByteArrayOutputStream extends OutputStream {
* currently in the buffer.
*/
public int size() {
return count;
return currentPointer - startOffset;
}
public int available() {
return limit - currentPointer;
}
}

View File

@ -141,7 +141,7 @@ public class SortedMapWritable extends AbstractMapWritable
for (Map.Entry<? extends WritableComparable, ? extends Writable> e:
t.entrySet()) {
instance.put(e.getKey(), e.getValue());
put(e.getKey(), e.getValue());
}
}
@ -203,4 +203,27 @@ public class SortedMapWritable extends AbstractMapWritable
e.getValue().write(out);
}
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof SortedMapWritable) {
Map map = (Map) obj;
if (size() != map.size()) {
return false;
}
return entrySet().equals(map.entrySet());
}
return false;
}
@Override
public int hashCode() {
return instance.hashCode();
}
}

View File

@ -55,6 +55,9 @@ public enum Errno {
EPIPE,
EDOM,
ERANGE,
ELOOP,
ENAMETOOLONG,
ENOTEMPTY,
UNKNOWN;
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.io.nativeio;
import java.io.File;
import java.io.FileDescriptor;
import java.io.IOException;
import java.util.Map;
@ -293,4 +294,35 @@ public class NativeIO {
stat.group = getName(IdCache.GROUP, stat.groupId);
return stat;
}
/**
* A version of renameTo that throws a descriptive exception when it fails.
*
* @param src The source path
* @param dst The destination path
*
* @throws NativeIOException On failure.
*/
public static void renameTo(File src, File dst)
throws IOException {
if (!nativeLoaded) {
if (!src.renameTo(dst)) {
throw new IOException("renameTo(src=" + src + ", dst=" +
dst + ") failed.");
}
} else {
renameTo0(src.getAbsolutePath(), dst.getAbsolutePath());
}
}
/**
* A version of renameTo that throws a descriptive exception when it fails.
*
* @param src The source path
* @param dst The destination path
*
* @throws NativeIOException On failure.
*/
private static native void renameTo0(String src, String dst)
throws NativeIOException;
}

View File

@ -35,6 +35,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.ConnectTimeoutException;
/**
* <p>
@ -543,6 +544,7 @@ public class RetryPolicies {
e instanceof NoRouteToHostException ||
e instanceof UnknownHostException ||
e instanceof StandbyException ||
e instanceof ConnectTimeoutException ||
isWrappedStandbyException(e)) {
return new RetryAction(
RetryAction.RetryDecision.FAILOVER_AND_RETRY,

View File

@ -67,6 +67,7 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SaslRpcClient;
@ -511,14 +512,14 @@ public class Client {
}
this.socket.setSoTimeout(pingInterval);
return;
} catch (SocketTimeoutException toe) {
} catch (ConnectTimeoutException toe) {
/* Check for an address change and update the local reference.
* Reset the failure counter if the address was changed
*/
if (updateAddress()) {
timeoutFailures = ioFailures = 0;
}
handleConnectionFailure(timeoutFailures++,
handleConnectionTimeout(timeoutFailures++,
maxRetriesOnSocketTimeouts, toe);
} catch (IOException ie) {
if (updateAddress()) {
@ -680,7 +681,7 @@ public class Client {
socket = null;
}
/* Handle connection failures
/* Handle connection failures due to timeout on connect
*
* If the current number of retries is equal to the max number of retries,
* stop retrying and throw the exception; Otherwise backoff 1 second and
@ -694,7 +695,7 @@ public class Client {
* @param ioe failure reason
* @throws IOException if max number of retries is reached
*/
private void handleConnectionFailure(
private void handleConnectionTimeout(
int curRetries, int maxRetries, IOException ioe) throws IOException {
closeConnection();

View File

@ -62,7 +62,7 @@ public class ProtobufRpcEngine implements RpcEngine {
static { // Register the rpcRequest deserializer for WritableRpcEngine
org.apache.hadoop.ipc.Server.registerProtocolEngine(
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWritable.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWrapper.class,
new Server.ProtoBufRpcInvoker());
}
@ -122,7 +122,7 @@ public class ProtobufRpcEngine implements RpcEngine {
public Invoker(Class<?> protocol, Client.ConnectionId connId,
Configuration conf, SocketFactory factory) {
this.remoteId = connId;
this.client = CLIENTS.getClient(conf, factory, RpcResponseWritable.class);
this.client = CLIENTS.getClient(conf, factory, RpcResponseWrapper.class);
this.protocolName = RPC.getProtocolName(protocol);
this.clientProtocolVersion = RPC
.getProtocolVersion(protocol);
@ -191,7 +191,7 @@ public class ProtobufRpcEngine implements RpcEngine {
}
RequestProto rpcRequest = constructRpcRequest(method, args);
RpcResponseWritable val = null;
RpcResponseWrapper val = null;
if (LOG.isTraceEnabled()) {
LOG.trace(Thread.currentThread().getId() + ": Call -> " +
@ -199,8 +199,8 @@ public class ProtobufRpcEngine implements RpcEngine {
" {" + TextFormat.shortDebugString((Message) args[1]) + "}");
}
try {
val = (RpcResponseWritable) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
new RpcRequestWritable(rpcRequest), remoteId);
val = (RpcResponseWrapper) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
new RpcRequestWrapper(rpcRequest), remoteId);
} catch (Throwable e) {
if (LOG.isTraceEnabled()) {
@ -268,16 +268,20 @@ public class ProtobufRpcEngine implements RpcEngine {
}
/**
* Writable Wrapper for Protocol Buffer Requests
* Wrapper for Protocol Buffer Requests
*
* Note while this wrapper is writable, the request on the wire is in
* Protobuf. Several methods on {@link org.apache.hadoop.ipc.Server and RPC}
* use type Writable as a wrapper to work across multiple RpcEngine kinds.
*/
private static class RpcRequestWritable implements Writable {
private static class RpcRequestWrapper implements Writable {
RequestProto message;
@SuppressWarnings("unused")
public RpcRequestWritable() {
public RpcRequestWrapper() {
}
RpcRequestWritable(RequestProto message) {
RpcRequestWrapper(RequestProto message) {
this.message = message;
}
@ -303,16 +307,20 @@ public class ProtobufRpcEngine implements RpcEngine {
}
/**
* Writable Wrapper for Protocol Buffer Responses
* Wrapper for Protocol Buffer Responses
*
* Note while this wrapper is writable, the request on the wire is in
* Protobuf. Several methods on {@link org.apache.hadoop.ipc.Server and RPC}
* use type Writable as a wrapper to work across multiple RpcEngine kinds.
*/
private static class RpcResponseWritable implements Writable {
private static class RpcResponseWrapper implements Writable {
byte[] responseMessage;
@SuppressWarnings("unused")
public RpcResponseWritable() {
public RpcResponseWrapper() {
}
public RpcResponseWritable(Message message) {
public RpcResponseWrapper(Message message) {
this.responseMessage = message.toByteArray();
}
@ -336,7 +344,7 @@ public class ProtobufRpcEngine implements RpcEngine {
@InterfaceStability.Unstable
static Client getClient(Configuration conf) {
return CLIENTS.getClient(conf, SocketFactory.getDefault(),
RpcResponseWritable.class);
RpcResponseWrapper.class);
}
@ -425,7 +433,7 @@ public class ProtobufRpcEngine implements RpcEngine {
*/
public Writable call(RPC.Server server, String connectionProtocolName,
Writable writableRequest, long receiveTime) throws Exception {
RpcRequestWritable request = (RpcRequestWritable) writableRequest;
RpcRequestWrapper request = (RpcRequestWrapper) writableRequest;
RequestProto rpcRequest = request.message;
String methodName = rpcRequest.getMethodName();
@ -487,7 +495,7 @@ public class ProtobufRpcEngine implements RpcEngine {
} catch (Exception e) {
throw e;
}
return new RpcResponseWritable(result);
return new RpcResponseWrapper(result);
}
}
}

View File

@ -15,9 +15,23 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module org.apache.hadoop.record {
class RecBuffer {
buffer data;
}
}
package org.apache.hadoop.net;
import java.net.SocketTimeoutException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown by {@link NetUtils#connect(java.net.Socket, java.net.SocketAddress, int)}
* if it times out while connecting to the remote host.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ConnectTimeoutException extends SocketTimeoutException {
private static final long serialVersionUID = 1L;
public ConnectTimeoutException(String msg) {
super(msg);
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.net;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Constructor;
import java.net.BindException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
@ -517,12 +518,16 @@ public class NetUtils {
socket.bind(localAddr);
}
try {
if (ch == null) {
// let the default implementation handle it.
socket.connect(endpoint, timeout);
} else {
SocketIOWithTimeout.connect(ch, endpoint, timeout);
}
} catch (SocketTimeoutException ste) {
throw new ConnectTimeoutException(ste.getMessage());
}
// There is a very rare case allowed by the TCP specification, such that
// if we are trying to connect to an endpoint on the local machine,
@ -719,7 +724,7 @@ public class NetUtils {
+ see("BindException"));
} else if (exception instanceof ConnectException) {
// connection refused; include the host:port in the error
return (ConnectException) new ConnectException(
return wrapWithMessage(exception,
"Call From "
+ localHost
+ " to "
@ -729,32 +734,28 @@ public class NetUtils {
+ " failed on connection exception: "
+ exception
+ ";"
+ see("ConnectionRefused"))
.initCause(exception);
+ see("ConnectionRefused"));
} else if (exception instanceof UnknownHostException) {
return (UnknownHostException) new UnknownHostException(
return wrapWithMessage(exception,
"Invalid host name: "
+ getHostDetailsAsString(destHost, destPort, localHost)
+ exception
+ ";"
+ see("UnknownHost"))
.initCause(exception);
+ see("UnknownHost"));
} else if (exception instanceof SocketTimeoutException) {
return (SocketTimeoutException) new SocketTimeoutException(
return wrapWithMessage(exception,
"Call From "
+ localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception
+ ";"
+ see("SocketTimeout"))
.initCause(exception);
+ see("SocketTimeout"));
} else if (exception instanceof NoRouteToHostException) {
return (NoRouteToHostException) new NoRouteToHostException(
return wrapWithMessage(exception,
"No Route to Host from "
+ localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception
+ ";"
+ see("NoRouteToHost"))
.initCause(exception);
+ see("NoRouteToHost"));
}
else {
return (IOException) new IOException("Failed on local exception: "
@ -770,6 +771,21 @@ public class NetUtils {
return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry;
}
@SuppressWarnings("unchecked")
private static <T extends IOException> T wrapWithMessage(
T exception, String msg) {
Class<? extends Throwable> clazz = exception.getClass();
try {
Constructor<? extends Throwable> ctor = clazz.getConstructor(String.class);
Throwable t = ctor.newInstance(msg);
return (T)(t.initCause(exception));
} catch (Throwable e) {
LOG.warn("Unable to wrap exception of type " +
clazz + ": it has no (String) constructor", e);
return exception;
}
}
/**
* Get the host details as a string
* @param destHost destinatioon host (nullable)

View File

@ -301,17 +301,26 @@ public class UserGroupInformation {
private static String OS_LOGIN_MODULE_NAME;
private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
private static final boolean windows =
System.getProperty("os.name").startsWith("Windows");
private static final boolean is64Bit =
System.getProperty("os.arch").contains("64");
private static final boolean ibmJava = System.getProperty("java.vendor").contains("IBM");
private static final boolean aix = System.getProperty("os.name").equals("AIX");
/* Return the OS login module class name */
private static String getOSLoginModuleName() {
if (System.getProperty("java.vendor").contains("IBM")) {
return windows ? (is64Bit
? "com.ibm.security.auth.module.Win64LoginModule"
: "com.ibm.security.auth.module.NTLoginModule")
: "com.ibm.security.auth.module.LinuxLoginModule";
if (ibmJava) {
if (windows) {
return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
: "com.ibm.security.auth.module.NTLoginModule";
} else if (aix) {
return is64Bit ? "com.ibm.security.auth.module.AIX64LoginModule"
: "com.ibm.security.auth.module.AIXLoginModule";
} else {
return "com.ibm.security.auth.module.LinuxLoginModule";
}
} else {
return windows ? "com.sun.security.auth.module.NTLoginModule"
: "com.sun.security.auth.module.UnixLoginModule";
@ -323,21 +332,24 @@ public class UserGroupInformation {
private static Class<? extends Principal> getOsPrincipalClass() {
ClassLoader cl = ClassLoader.getSystemClassLoader();
try {
if (System.getProperty("java.vendor").contains("IBM")) {
String principalClass = null;
if (ibmJava) {
if (is64Bit) {
principalClass = "com.ibm.security.auth.UsernamePrincipal";
} else {
if (windows) {
return (Class<? extends Principal>) (is64Bit
? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
: cl.loadClass("com.ibm.security.auth.NTUserPrincipal"));
principalClass = "com.ibm.security.auth.NTUserPrincipal";
} else if (aix) {
principalClass = "com.ibm.security.auth.AIXPrincipal";
} else {
return (Class<? extends Principal>) (is64Bit
? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
: cl.loadClass("com.ibm.security.auth.LinuxPrincipal"));
principalClass = "com.ibm.security.auth.LinuxPrincipal";
}
}
} else {
return (Class<? extends Principal>) (windows
? cl.loadClass("com.sun.security.auth.NTUserPrincipal")
: cl.loadClass("com.sun.security.auth.UnixPrincipal"));
principalClass = windows ? "com.sun.security.auth.NTUserPrincipal"
: "com.sun.security.auth.UnixPrincipal";
}
return (Class<? extends Principal>) cl.loadClass(principalClass);
} catch (ClassNotFoundException e) {
LOG.error("Unable to find JAAS classes:" + e.getMessage());
}
@ -418,13 +430,22 @@ public class UserGroupInformation {
private static final Map<String,String> USER_KERBEROS_OPTIONS =
new HashMap<String,String>();
static {
if (ibmJava) {
USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
} else {
USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
USER_KERBEROS_OPTIONS.put("renewTGT", "true");
}
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
if (ibmJava) {
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
} else {
USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
}
}
USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
}
private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
@ -434,10 +455,14 @@ public class UserGroupInformation {
private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS =
new HashMap<String,String>();
static {
if (ibmJava) {
KEYTAB_KERBEROS_OPTIONS.put("credsType", "both");
} else {
KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
}
KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
}
private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
@ -462,7 +487,12 @@ public class UserGroupInformation {
} else if (USER_KERBEROS_CONFIG_NAME.equals(appName)) {
return USER_KERBEROS_CONF;
} else if (KEYTAB_KERBEROS_CONFIG_NAME.equals(appName)) {
if (ibmJava) {
KEYTAB_KERBEROS_OPTIONS.put("useKeytab",
prependFileAuthority(keytabFile));
} else {
KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
}
KEYTAB_KERBEROS_OPTIONS.put("principal", keytabPrincipal);
return KEYTAB_KERBEROS_CONF;
}
@ -470,6 +500,11 @@ public class UserGroupInformation {
}
}
private static String prependFileAuthority(String keytabPath) {
return keytabPath.startsWith("file://") ? keytabPath
: "file://" + keytabPath;
}
/**
* Represents a javax.security configuration that is created at runtime.
*/
@ -666,6 +701,7 @@ public class UserGroupInformation {
}
loginUser.spawnAutoRenewalThreadForUserCreds();
} catch (LoginException le) {
LOG.debug("failure to login", le);
throw new IOException("failure to login", le);
}
if (LOG.isDebugEnabled()) {

View File

@ -268,7 +268,12 @@ public class GenericOptionsParser {
}
if (line.hasOption("jt")) {
conf.set("mapred.job.tracker", line.getOptionValue("jt"),
String optionValue = line.getOptionValue("jt");
if (optionValue.equalsIgnoreCase("local")) {
conf.set("mapreduce.framework.name", optionValue);
}
conf.set("yarn.resourcemanager.address", optionValue,
"from -jt command line option");
}
if (line.hasOption("conf")) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.util;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
@ -39,7 +40,7 @@ import org.apache.hadoop.io.Text;
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class LineReader {
public class LineReader implements Closeable {
private static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
private int bufferSize = DEFAULT_BUFFER_SIZE;
private InputStream in;

View File

@ -23,8 +23,6 @@ import java.io.StringWriter;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.DateFormat;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@ -34,12 +32,13 @@ import java.util.List;
import java.util.Locale;
import java.util.StringTokenizer;
import com.google.common.net.InetAddresses;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.NetUtils;
import com.google.common.net.InetAddresses;
/**
* General string utils
*/
@ -52,13 +51,6 @@ public class StringUtils {
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 0;
private static final DecimalFormat decimalFormat;
static {
NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
decimalFormat = (DecimalFormat) numberFormat;
decimalFormat.applyPattern("#.##");
}
/**
* Make a string representation of the exception.
* @param e The exception to stringify
@ -88,49 +80,32 @@ public class StringUtils {
return fullHostname;
}
private static DecimalFormat oneDecimal = new DecimalFormat("0.0");
/**
* Given an integer, return a string that is in an approximate, but human
* readable format.
* It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
* @param number the number to format
* @return a human readable form of the integer
*
* @deprecated use {@link TraditionalBinaryPrefix#long2String(long, String, int)}.
*/
@Deprecated
public static String humanReadableInt(long number) {
long absNumber = Math.abs(number);
double result = number;
String suffix = "";
if (absNumber < 1024) {
// since no division has occurred, don't format with a decimal point
return String.valueOf(number);
} else if (absNumber < 1024 * 1024) {
result = number / 1024.0;
suffix = "k";
} else if (absNumber < 1024 * 1024 * 1024) {
result = number / (1024.0 * 1024);
suffix = "m";
} else {
result = number / (1024.0 * 1024 * 1024);
suffix = "g";
return TraditionalBinaryPrefix.long2String(number, "", 1);
}
return oneDecimal.format(result) + suffix;
/** The same as String.format(Locale.ENGLISH, format, objects). */
public static String format(final String format, final Object... objects) {
return String.format(Locale.ENGLISH, format, objects);
}
/**
* Format a percentage for presentation to the user.
* @param done the percentage to format (0.0 to 1.0)
* @param digits the number of digits past the decimal point
* @param fraction the percentage as a fraction, e.g. 0.1 = 10%
* @param decimalPlaces the number of decimal places
* @return a string representation of the percentage
*/
public static String formatPercent(double done, int digits) {
DecimalFormat percentFormat = new DecimalFormat("0.00%");
double scale = Math.pow(10.0, digits+2);
double rounded = Math.floor(done * scale);
percentFormat.setDecimalSeparatorAlwaysShown(false);
percentFormat.setMinimumFractionDigits(digits);
percentFormat.setMaximumFractionDigits(digits);
return percentFormat.format(rounded / scale);
public static String formatPercent(double fraction, int decimalPlaces) {
return format("%." + decimalPlaces + "f%%", fraction*100);
}
/**
@ -165,7 +140,7 @@ public class StringUtils {
}
StringBuilder s = new StringBuilder();
for(int i = start; i < end; i++) {
s.append(String.format("%02x", bytes[i]));
s.append(format("%02x", bytes[i]));
}
return s.toString();
}
@ -630,18 +605,22 @@ public class StringUtils {
* TraditionalBinaryPrefix symbol are case insensitive.
*/
public static enum TraditionalBinaryPrefix {
KILO(1024),
MEGA(KILO.value << 10),
GIGA(MEGA.value << 10),
TERA(GIGA.value << 10),
PETA(TERA.value << 10),
EXA(PETA.value << 10);
KILO(10),
MEGA(KILO.bitShift + 10),
GIGA(MEGA.bitShift + 10),
TERA(GIGA.bitShift + 10),
PETA(TERA.bitShift + 10),
EXA (PETA.bitShift + 10);
public final long value;
public final char symbol;
public final int bitShift;
public final long bitMask;
TraditionalBinaryPrefix(long value) {
this.value = value;
private TraditionalBinaryPrefix(int bitShift) {
this.bitShift = bitShift;
this.value = 1L << bitShift;
this.bitMask = this.value - 1L;
this.symbol = toString().charAt(0);
}
@ -692,6 +671,56 @@ public class StringUtils {
return num * prefix;
}
}
/**
* Convert a long integer to a string with traditional binary prefix.
*
* @param n the value to be converted
* @param unit The unit, e.g. "B" for bytes.
* @param decimalPlaces The number of decimal places.
* @return a string with traditional binary prefix.
*/
public static String long2String(long n, String unit, int decimalPlaces) {
if (unit == null) {
unit = "";
}
//take care a special case
if (n == Long.MIN_VALUE) {
return "-8 " + EXA.symbol + unit;
}
final StringBuilder b = new StringBuilder();
//take care negative numbers
if (n < 0) {
b.append('-');
n = -n;
}
if (n < KILO.value) {
//no prefix
b.append(n);
return (unit.isEmpty()? b: b.append(" ").append(unit)).toString();
} else {
//find traditional binary prefix
int i = 0;
for(; i < values().length && n >= values()[i].value; i++);
TraditionalBinaryPrefix prefix = values()[i - 1];
if ((n & prefix.bitMask) == 0) {
//exact division
b.append(n >> prefix.bitShift);
} else {
final String format = "%." + decimalPlaces + "f";
String s = format(format, n/(double)prefix.value);
//check a special rounding up case
if (s.startsWith("1024")) {
prefix = values()[i];
s = format(format, n/(double)prefix.value);
}
b.append(s);
}
return b.append(' ').append(prefix.symbol).append(unit).toString();
}
}
}
/**
@ -731,32 +760,16 @@ public class StringUtils {
}
/**
* Return an abbreviated English-language desc of the byte length
* @return a byte description of the given long interger value.
*/
public static String byteDesc(long len) {
double val = 0.0;
String ending = "";
if (len < 1024 * 1024) {
val = (1.0 * len) / 1024;
ending = " KB";
} else if (len < 1024 * 1024 * 1024) {
val = (1.0 * len) / (1024 * 1024);
ending = " MB";
} else if (len < 1024L * 1024 * 1024 * 1024) {
val = (1.0 * len) / (1024 * 1024 * 1024);
ending = " GB";
} else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
val = (1.0 * len) / (1024L * 1024 * 1024 * 1024);
ending = " TB";
} else {
val = (1.0 * len) / (1024L * 1024 * 1024 * 1024 * 1024);
ending = " PB";
}
return limitDecimalTo2(val) + ending;
return TraditionalBinaryPrefix.long2String(len, "B", 2);
}
public static synchronized String limitDecimalTo2(double d) {
return decimalFormat.format(d);
/** @deprecated use StringUtils.format("%.2f", d). */
@Deprecated
public static String limitDecimalTo2(double d) {
return format("%.2f", d);
}
/**

View File

@ -24,11 +24,12 @@
#include <grp.h>
#include <jni.h>
#include <pwd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
#include "config.h"
@ -502,6 +503,26 @@ ssize_t get_pw_buflen() {
#endif
return (ret > 512) ? ret : 512;
}
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env,
jclass clazz, jstring jsrc, jstring jdst)
{
const char *src = NULL, *dst = NULL;
src = (*env)->GetStringUTFChars(env, jsrc, NULL);
if (!src) goto done; // exception was thrown
dst = (*env)->GetStringUTFChars(env, jdst, NULL);
if (!dst) goto done; // exception was thrown
if (rename(src, dst)) {
throw_ioe(env, errno);
}
done:
if (src) (*env)->ReleaseStringUTFChars(env, jsrc, src);
if (dst) (*env)->ReleaseStringUTFChars(env, jdst, dst);
}
/**
* vim: sw=2: ts=2: et:
*/

View File

@ -63,6 +63,9 @@ static errno_mapping_t ERRNO_MAPPINGS[] = {
MAPPING(EPIPE),
MAPPING(EDOM),
MAPPING(ERANGE),
MAPPING(ELOOP),
MAPPING(ENAMETOOLONG),
MAPPING(ENOTEMPTY),
{-1, NULL}
};

View File

@ -325,6 +325,13 @@ Hadoop MapReduce Next Generation - Cluster Setup
| | | How long to keep aggregation logs before deleting them. -1 disables. |
| | | Be careful, set this too small and you will spam the name node. |
*-------------------------+-------------------------+------------------------+
| <<<yarn.log-aggregation.retain-check-interval-seconds>>> | | |
| | <-1> | |
| | | Time between checks for aggregated log retention. If set to 0 or a |
| | | negative value then the value is computed as one-tenth of the |
| | | aggregated log retention time. |
| | | Be careful, set this too small and you will spam the name node. |
*-------------------------+-------------------------+------------------------+

View File

@ -350,10 +350,11 @@ Administration Commands
Runs a HDFS dfsadmin client.
Usage: <<<hadoop dfsadmin [GENERIC_OPTIONS] [-report] [-safemode enter | leave | get | wait] [-refreshNodes] [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] [-setQuota <quota> <dirname>...<dirname>] [-clrQuota <dirname>...<dirname>] [-help [cmd]]>>>
Usage: <<<hadoop dfsadmin [GENERIC_OPTIONS] [-report] [-safemode enter | leave | get | wait] [-refreshNodes] [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] [-setQuota <quota> <dirname>...<dirname>] [-clrQuota <dirname>...<dirname>] [-restoreFailedStorage true|false|check] [-help [cmd]]>>>
*-----------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*-----------------+-----------------------------------------------------------+
| -report | Reports basic filesystem information and statistics.
*-----------------+-----------------------------------------------------------+
| -safemode enter / leave / get / wait | Safe mode maintenance command. Safe
@ -403,6 +404,10 @@ Administration Commands
| 2. user is not an administrator. It does not fault if the
| directory has no quota.
*-----------------+-----------------------------------------------------------+
| -restoreFailedStorage true / false / check | This option will turn on/off automatic attempt to restore failed storage replicas.
| If a failed storage becomes available again the system will attempt to restore
| edits and/or fsimage during checkpoint. 'check' option will return current setting.
*-----------------+-----------------------------------------------------------+
| -help [cmd] | Displays help for the given command or all commands if none
| is specified.
*-----------------+-----------------------------------------------------------+

View File

@ -1,63 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module org.apache.hadoop.record {
class RecRecord0 {
ustring stringVal;
}
class RecRecord1 {
boolean boolVal;
byte byteVal;
int intVal;
long longVal;
float floatVal; // testing inline comment
double doubleVal; /* testing comment */
ustring stringVal; /* testing multi-line
* comment */
buffer bufferVal; // testing another // inline comment
vector<ustring> vectorVal;
map<ustring, ustring> mapVal;
RecRecord0 recordVal;
}
class RecRecordOld {
ustring name;
vector<long> ivec;
vector<vector<RecRecord0>> svec;
RecRecord0 inner;
vector<vector<vector<ustring>>> strvec;
float i1;
map<byte, ustring> map1;
vector<map<int, long>> mvec1;
vector<map<int, long>> mvec2;
}
/* RecRecordNew is a lot like RecRecordOld. Helps test for versioning. */
class RecRecordNew {
ustring name2;
RecRecord0 inner;
vector<int> ivec;
vector<vector<int>> svec;
vector<vector<vector<ustring>>> strvec;
int i1;
map<long, ustring> map1;
vector<map<int, long>> mvec2;
}
}

View File

@ -546,4 +546,44 @@ public class TestFileUtil {
long expected = 2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected, du);
}
private void doUntarAndVerify(File tarFile, File untarDir)
throws IOException {
if (untarDir.exists() && !FileUtil.fullyDelete(untarDir)) {
throw new IOException("Could not delete directory '" + untarDir + "'");
}
FileUtil.unTar(tarFile, untarDir);
String parentDir = untarDir.getCanonicalPath() + Path.SEPARATOR + "name";
File testFile = new File(parentDir + Path.SEPARATOR + "version");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 0);
String imageDir = parentDir + Path.SEPARATOR + "image";
testFile = new File(imageDir + Path.SEPARATOR + "fsimage");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 157);
String currentDir = parentDir + Path.SEPARATOR + "current";
testFile = new File(currentDir + Path.SEPARATOR + "fsimage");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 4331);
testFile = new File(currentDir + Path.SEPARATOR + "edits");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 1033);
testFile = new File(currentDir + Path.SEPARATOR + "fstime");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 8);
}
@Test
public void testUntar() throws IOException {
String tarGzFileName = System.getProperty("test.cache.data",
"build/test/cache") + "/test-untar.tgz";
String tarFileName = System.getProperty("test.cache.data",
"build/test/cache") + "/test-untar.tar";
String dataDir = System.getProperty("test.build.data", "build/test/data");
File untarDir = new File(dataDir, "untarDir");
doUntarAndVerify(new File(tarGzFileName), untarDir);
doUntarAndVerify(new File(tarFileName), untarDir);
}
}

View File

@ -303,6 +303,46 @@ public class TestFsShellReturnCode {
}
}
@Test
public void testRmWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
final String results;
try {
int exit = shell.run(new String[]{"-rm", "nomatch*"});
assertEquals(1, exit);
results = bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
@Test
public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
try {
int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
assertEquals(0, exit);
assertTrue(bytes.toString().isEmpty());
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
@Test
public void testInvalidDefaultFS() throws Exception {
// if default fs doesn't exist or is invalid, but the path provided in

View File

@ -28,6 +28,7 @@ import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -46,8 +47,18 @@ public class TestHarFileSystemBasics {
private static final String ROOT_PATH = System.getProperty("test.build.data",
"build/test/data");
private static final Path rootPath = new Path(
new File(ROOT_PATH).getAbsolutePath() + "/localfs");
private static final Path rootPath;
static {
String root = new Path(new File(ROOT_PATH).getAbsolutePath(), "localfs")
.toUri().getPath();
// Strip drive specifier on Windows, which would make the HAR URI invalid and
// cause tests to fail.
if (Shell.WINDOWS) {
root = root.substring(root.indexOf(':') + 1);
}
rootPath = new Path(root);
}
// NB: .har suffix is necessary
private static final Path harPath = new Path(rootPath, "path1/path2/my.har");

View File

@ -45,17 +45,37 @@ public class TestListFiles {
final protected static Configuration conf = new Configuration();
protected static FileSystem fs;
final protected static Path TEST_DIR = getTestDir();
protected static Path TEST_DIR;
final private static int FILE_LEN = 10;
final private static Path FILE1 = new Path(TEST_DIR, "file1");
final private static Path DIR1 = new Path(TEST_DIR, "dir1");
final private static Path FILE2 = new Path(DIR1, "file2");
final private static Path FILE3 = new Path(DIR1, "file3");
private static Path FILE1;
private static Path DIR1;
private static Path FILE2;
private static Path FILE3;
static {
setTestPaths(new Path(
System.getProperty("test.build.data", "build/test/data/work-dir/localfs"),
"main_"));
}
protected static Path getTestDir() {
return new Path(
System.getProperty("test.build.data","build/test/data/work-dir/localfs"),
"main_");
return TEST_DIR;
}
/**
* Sets the root testing directory and reinitializes any additional test paths
* that are under the root. This method is intended to be called from a
* subclass's @BeforeClass method if there is a need to override the testing
* directory.
*
* @param testDir Path root testing directory
*/
protected static void setTestPaths(Path testDir) {
TEST_DIR = testDir;
FILE1 = new Path(TEST_DIR, "file1");
DIR1 = new Path(TEST_DIR, "dir1");
FILE2 = new Path(DIR1, "file2");
FILE3 = new Path(DIR1, "file3");
}
@BeforeClass

View File

@ -28,6 +28,7 @@ import java.io.*;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -38,8 +39,9 @@ public class TestLocalFileSystem {
private static final String TEST_ROOT_DIR
= System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
private final File base = new File(TEST_ROOT_DIR);
private Configuration conf;
private FileSystem fileSys;
private LocalFileSystem fileSys;
private void cleanupFile(FileSystem fs, Path name) throws IOException {
assertTrue(fs.exists(name));
@ -54,6 +56,13 @@ public class TestLocalFileSystem {
fileSys.delete(new Path(TEST_ROOT_DIR), true);
}
@After
public void after() throws IOException {
base.setWritable(true);
FileUtil.fullyDelete(base);
assertTrue(!base.exists());
}
/**
* Test the capability of setting the working directory.
*/
@ -269,10 +278,83 @@ public class TestLocalFileSystem {
LocalFileSystem fs = FileSystem.getLocal(conf);
File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
colonFile.mkdirs();
colonFile.createNewFile();
FileStatus[] stats = fs.listStatus(new Path(TEST_ROOT_DIR));
assertEquals("Unexpected number of stats", 1, stats.length);
assertEquals("Bad path from stat", colonFile.getAbsolutePath(),
stats[0].getPath().toUri().getPath());
}
@Test
public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
final File dir1 = new File(base, "dir1");
final File dir2 = new File(dir1, "dir2");
dir2.mkdirs();
assertTrue(dir2.exists() && dir2.canWrite());
final String dataFileName = "corruptedData";
final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
final Path checksumPath = fileSys.getChecksumFile(dataPath);
final FSDataOutputStream fsdos = fileSys.create(dataPath);
try {
fsdos.writeUTF("foo");
} finally {
fsdos.close();
}
assertTrue(fileSys.pathToFile(dataPath).exists());
final long dataFileLength = fileSys.getFileStatus(dataPath).getLen();
assertTrue(dataFileLength > 0);
// check the the checksum file is created and not empty:
assertTrue(fileSys.pathToFile(checksumPath).exists());
final long checksumFileLength = fileSys.getFileStatus(checksumPath).getLen();
assertTrue(checksumFileLength > 0);
// this is a hack to force the #reportChecksumFailure() method to stop
// climbing up at the 'base' directory and use 'dir1/bad_files' as the
// corrupted files storage:
base.setWritable(false);
FSDataInputStream dataFsdis = fileSys.open(dataPath);
FSDataInputStream checksumFsdis = fileSys.open(checksumPath);
boolean retryIsNecessary = fileSys.reportChecksumFailure(dataPath, dataFsdis, 0, checksumFsdis, 0);
assertTrue(!retryIsNecessary);
// the data file should be moved:
assertTrue(!fileSys.pathToFile(dataPath).exists());
// the checksum file should be moved:
assertTrue(!fileSys.pathToFile(checksumPath).exists());
// check that the files exist in the new location where they were moved:
File[] dir1files = dir1.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname != null && !pathname.getName().equals("dir2");
}
});
assertTrue(dir1files != null);
assertTrue(dir1files.length == 1);
File badFilesDir = dir1files[0];
File[] badFiles = badFilesDir.listFiles();
assertTrue(badFiles != null);
assertTrue(badFiles.length == 2);
boolean dataFileFound = false;
boolean checksumFileFound = false;
for (File badFile: badFiles) {
if (badFile.getName().startsWith(dataFileName)) {
assertTrue(dataFileLength == badFile.length());
dataFileFound = true;
} else if (badFile.getName().contains(dataFileName + ".crc")) {
assertTrue(checksumFileLength == badFile.length());
checksumFileFound = true;
}
}
assertTrue(dataFileFound);
assertTrue(checksumFileFound);
}
}

View File

@ -88,4 +88,61 @@ public class TestBoundedByteArrayOutputStream extends TestCase {
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
}
static class ResettableBoundedByteArrayOutputStream
extends BoundedByteArrayOutputStream {
public ResettableBoundedByteArrayOutputStream(int capacity) {
super(capacity);
}
public void resetBuffer(byte[] buf, int offset, int length) {
super.resetBuffer(buf, offset, length);
}
}
public void testResetBuffer() throws IOException {
ResettableBoundedByteArrayOutputStream stream =
new ResettableBoundedByteArrayOutputStream(SIZE);
// Write to the stream, get the data back and check for contents
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing beyond end of buffer. Should throw an exception
boolean caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
//Reset the stream and try, should succeed
byte[] newBuf = new byte[SIZE];
stream.resetBuffer(newBuf, 0, newBuf.length);
assertTrue("Limit did not get reset correctly",
(stream.getLimit() == SIZE));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing one more byte, should fail
caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
}
}

View File

@ -17,15 +17,20 @@
*/
package org.apache.hadoop.io;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
import java.util.Map;
import org.junit.Test;
/**
* Tests SortedMapWritable
*/
public class TestSortedMapWritable extends TestCase {
public class TestSortedMapWritable {
/** the test */
@Test
@SuppressWarnings("unchecked")
public void testSortedMapWritable() {
Text[] keys = {
@ -90,6 +95,7 @@ public class TestSortedMapWritable extends TestCase {
/**
* Test that number of "unknown" classes is propagated across multiple copies.
*/
@Test
@SuppressWarnings("deprecation")
public void testForeignClass() {
SortedMapWritable inMap = new SortedMapWritable();
@ -99,4 +105,77 @@ public class TestSortedMapWritable extends TestCase {
SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
assertEquals(1, copyOfCopy.getNewClasses());
}
/**
* Tests if equal and hashCode method still hold the contract.
*/
@Test
public void testEqualsAndHashCode() {
String failureReason;
SortedMapWritable mapA = new SortedMapWritable();
SortedMapWritable mapB = new SortedMapWritable();
// Sanity checks
failureReason = "SortedMapWritable couldn't be initialized. Got null reference";
assertNotNull(failureReason, mapA);
assertNotNull(failureReason, mapB);
// Basic null check
assertFalse("equals method returns true when passed null", mapA.equals(null));
// When entry set is empty, they should be equal
assertTrue("Two empty SortedMapWritables are no longer equal", mapA.equals(mapB));
// Setup
Text[] keys = {
new Text("key1"),
new Text("key2")
};
BytesWritable[] values = {
new BytesWritable("value1".getBytes()),
new BytesWritable("value2".getBytes())
};
mapA.put(keys[0], values[0]);
mapB.put(keys[1], values[1]);
// entrySets are different
failureReason = "Two SortedMapWritables with different data are now equal";
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason, !mapA.equals(mapB));
assertTrue(failureReason, !mapB.equals(mapA));
mapA.put(keys[1], values[1]);
mapB.put(keys[0], values[0]);
// entrySets are now same
failureReason = "Two SortedMapWritables with same entry sets formed in different order are now different";
assertEquals(failureReason, mapA.hashCode(), mapB.hashCode());
assertTrue(failureReason, mapA.equals(mapB));
assertTrue(failureReason, mapB.equals(mapA));
// Let's check if entry sets of same keys but different values
mapA.put(keys[0], values[1]);
mapA.put(keys[1], values[0]);
failureReason = "Two SortedMapWritables with different content are now equal";
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason, !mapA.equals(mapB));
assertTrue(failureReason, !mapB.equals(mapA));
}
@Test(timeout = 1000)
public void testPutAll() {
SortedMapWritable map1 = new SortedMapWritable();
SortedMapWritable map2 = new SortedMapWritable();
map1.put(new Text("key"), new Text("value"));
map2.putAll(map1);
assertEquals("map1 entries don't match map2 entries", map1, map2);
assertTrue(
"map2 doesn't have class information from map1",
map2.classToIdMap.containsKey(Text.class)
&& map2.idToClassMap.containsValue(Text.class));
}
}

View File

@ -25,11 +25,14 @@ import java.io.IOException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assume.*;
import static org.junit.Assert.*;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -293,4 +296,40 @@ public class TestNativeIO {
assertFalse(NativeIO.getGroupName(0).isEmpty());
}
@Test
public void testRenameTo() throws Exception {
final File TEST_DIR = new File(new File(
System.getProperty("test.build.data","build/test/data")), "renameTest");
assumeTrue(TEST_DIR.mkdirs());
File nonExistentFile = new File(TEST_DIR, "nonexistent");
File targetFile = new File(TEST_DIR, "target");
// Test attempting to rename a nonexistent file.
try {
NativeIO.renameTo(nonExistentFile, targetFile);
Assert.fail();
} catch (NativeIOException e) {
Assert.assertEquals(e.getErrno(), Errno.ENOENT);
}
// Test renaming a file to itself. It should succeed and do nothing.
File sourceFile = new File(TEST_DIR, "source");
Assert.assertTrue(sourceFile.createNewFile());
NativeIO.renameTo(sourceFile, sourceFile);
// Test renaming a source to a destination.
NativeIO.renameTo(sourceFile, targetFile);
// Test renaming a source to a path which uses a file as a directory.
sourceFile = new File(TEST_DIR, "source");
Assert.assertTrue(sourceFile.createNewFile());
File badTarget = new File(targetFile, "subdir");
try {
NativeIO.renameTo(sourceFile, badTarget);
Assert.fail();
} catch (NativeIOException e) {
Assert.assertEquals(e.getErrno(), Errno.ENOTDIR);
}
FileUtils.deleteQuietly(TEST_DIR);
}
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.NetUtils;
import java.util.Random;
@ -586,7 +587,7 @@ public class TestIPC {
private void assertRetriesOnSocketTimeouts(Configuration conf,
int maxTimeoutRetries) throws IOException, InterruptedException {
SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
doThrow(new SocketTimeoutException()).when(mockFactory).createSocket();
doThrow(new ConnectTimeoutException("fake")).when(mockFactory).createSocket();
Client client = new Client(IntWritable.class, conf, mockFactory);
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 9090);
try {

View File

@ -56,6 +56,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
/**
* Test the MetricsSystemImpl class
@ -80,7 +81,7 @@ public class TestMetricsSystemImpl {
}
}
@Test public void testInitFirst() throws Exception {
@Test public void testInitFirstVerifyStopInvokedImmediately() throws Exception {
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("test.sink.test.class", TestSink.class.getName())
@ -106,12 +107,59 @@ public class TestMetricsSystemImpl {
ms.stop();
ms.shutdown();
verify(sink1, times(2)).putMetrics(r1.capture());
//When we call stop, at most two sources will be consumed by each sink thread.
verify(sink1, atMost(2)).putMetrics(r1.capture());
List<MetricsRecord> mr1 = r1.getAllValues();
verify(sink2, atMost(2)).putMetrics(r2.capture());
List<MetricsRecord> mr2 = r2.getAllValues();
if (mr1.size() != 0 && mr2.size() != 0) {
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
} else if (mr1.size() != 0) {
checkMetricsRecords(mr1);
} else if (mr2.size() != 0) {
checkMetricsRecords(mr2);
}
}
@Test public void testInitFirstVerifyCallBacks() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("test.sink.test.class", TestSink.class.getName())
.add("test.*.source.filter.exclude", "s0")
.add("test.source.s1.metric.filter.exclude", "X*")
.add("test.sink.sink1.metric.filter.exclude", "Y*")
.add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
ms.publishMetricsNow(); // publish the metrics
try {
verify(sink1, timeout(200).times(2)).putMetrics(r1.capture());
verify(sink2, timeout(200).times(2)).putMetrics(r2.capture());
} finally {
ms.stop();
ms.shutdown();
}
//When we call stop, at most two sources will be consumed by each sink thread.
List<MetricsRecord> mr1 = r1.getAllValues();
verify(sink2, times(2)).putMetrics(r2.capture());
List<MetricsRecord> mr2 = r2.getAllValues();
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
}
@Test public void testMultiThreadedPublish() throws Exception {

View File

@ -1,122 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.record;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.TreeMap;
import junit.framework.*;
/**
*/
public class FromCpp extends TestCase {
public FromCpp(String testName) {
super(testName);
}
@Override
protected void setUp() throws Exception {
}
@Override
protected void tearDown() throws Exception {
}
public void testBinary() {
File tmpfile;
try {
tmpfile = new File("/temp/hadooptmp.dat");
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
FileInputStream istream = new FileInputStream(tmpfile);
BinaryRecordInput in = new BinaryRecordInput(istream);
RecRecord1 r2 = new RecRecord1();
r2.deserialize(in, "");
istream.close();
assertTrue(r1.equals(r2));
} catch (IOException ex) {
ex.printStackTrace();
}
}
public void testCsv() {
File tmpfile;
try {
tmpfile = new File("/temp/hadooptmp.txt");
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
FileInputStream istream = new FileInputStream(tmpfile);
CsvRecordInput in = new CsvRecordInput(istream);
RecRecord1 r2 = new RecRecord1();
r2.deserialize(in, "");
istream.close();
assertTrue(r1.equals(r2));
} catch (IOException ex) {
ex.printStackTrace();
}
}
public void testXml() {
File tmpfile;
try {
tmpfile = new File("/temp/hadooptmp.xml");
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
FileInputStream istream = new FileInputStream(tmpfile);
XmlRecordInput in = new XmlRecordInput(istream);
RecRecord1 r2 = new RecRecord1();
r2.deserialize(in, "");
istream.close();
assertTrue(r1.equals(r2));
} catch (IOException ex) {
ex.printStackTrace();
}
}
}

View File

@ -1,311 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.record;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.Random;
/**
* Benchmark for various types of serializations
*/
public class RecordBench {
private static class Times {
long init;
long serialize;
long deserialize;
long write;
long readFields;
};
private static final long SEED = 0xDEADBEEFL;
private static final Random rand = new Random();
/** Do not allow to create a new instance of RecordBench */
private RecordBench() {}
private static void initBuffers(Record[] buffers) {
final int BUFLEN = 32;
for (int idx = 0; idx < buffers.length; idx++) {
buffers[idx] = new RecBuffer();
int buflen = rand.nextInt(BUFLEN);
byte[] bytes = new byte[buflen];
rand.nextBytes(bytes);
((RecBuffer)buffers[idx]).setData(new Buffer(bytes));
}
}
private static void initStrings(Record[] strings) {
final int STRLEN = 32;
for (int idx = 0; idx < strings.length; idx++) {
strings[idx] = new RecString();
int strlen = rand.nextInt(STRLEN);
StringBuilder sb = new StringBuilder(strlen);
for (int ich = 0; ich < strlen; ich++) {
int cpt = 0;
while (true) {
cpt = rand.nextInt(0x10FFFF+1);
if (Utils.isValidCodePoint(cpt)) {
break;
}
}
sb.appendCodePoint(cpt);
}
((RecString)strings[idx]).setData(sb.toString());
}
}
private static void initInts(Record[] ints) {
for (int idx = 0; idx < ints.length; idx++) {
ints[idx] = new RecInt();
((RecInt)ints[idx]).setData(rand.nextInt());
}
}
private static Record[] makeArray(String type, int numRecords, Times times) {
Method init = null;
try {
init = RecordBench.class.getDeclaredMethod("init"+
toCamelCase(type) + "s",
new Class[] {Record[].class});
} catch (NoSuchMethodException ex) {
throw new RuntimeException(ex);
}
Record[] records = new Record[numRecords];
times.init = System.nanoTime();
try {
init.invoke(null, new Object[]{records});
} catch (Exception ex) {
throw new RuntimeException(ex);
}
times.init = System.nanoTime() - times.init;
return records;
}
private static void runBinaryBench(String type, int numRecords, Times times)
throws IOException {
Record[] records = makeArray(type, numRecords, times);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
BinaryRecordOutput rout = new BinaryRecordOutput(bout);
DataOutputStream dout = new DataOutputStream(bout);
for(int idx = 0; idx < numRecords; idx++) {
records[idx].serialize(rout);
}
bout.reset();
times.serialize = System.nanoTime();
for(int idx = 0; idx < numRecords; idx++) {
records[idx].serialize(rout);
}
times.serialize = System.nanoTime() - times.serialize;
byte[] serialized = bout.toByteArray();
ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
BinaryRecordInput rin = new BinaryRecordInput(bin);
times.deserialize = System.nanoTime();
for(int idx = 0; idx < numRecords; idx++) {
records[idx].deserialize(rin);
}
times.deserialize = System.nanoTime() - times.deserialize;
bout.reset();
times.write = System.nanoTime();
for(int idx = 0; idx < numRecords; idx++) {
records[idx].write(dout);
}
times.write = System.nanoTime() - times.write;
bin.reset();
DataInputStream din = new DataInputStream(bin);
times.readFields = System.nanoTime();
for(int idx = 0; idx < numRecords; idx++) {
records[idx].readFields(din);
}
times.readFields = System.nanoTime() - times.readFields;
}
private static void runCsvBench(String type, int numRecords, Times times)
throws IOException {
Record[] records = makeArray(type, numRecords, times);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
CsvRecordOutput rout = new CsvRecordOutput(bout);
for(int idx = 0; idx < numRecords; idx++) {
records[idx].serialize(rout);
}
bout.reset();
times.serialize = System.nanoTime();
for(int idx = 0; idx < numRecords; idx++) {
records[idx].serialize(rout);
}
times.serialize = System.nanoTime() - times.serialize;
byte[] serialized = bout.toByteArray();
ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
CsvRecordInput rin = new CsvRecordInput(bin);
times.deserialize = System.nanoTime();
for(int idx = 0; idx < numRecords; idx++) {
records[idx].deserialize(rin);
}
times.deserialize = System.nanoTime() - times.deserialize;
}
private static void runXmlBench(String type, int numRecords, Times times)
throws IOException {
Record[] records = makeArray(type, numRecords, times);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
XmlRecordOutput rout = new XmlRecordOutput(bout);
for(int idx = 0; idx < numRecords; idx++) {
records[idx].serialize(rout);
}
bout.reset();
bout.write("<records>\n".getBytes());
times.serialize = System.nanoTime();
for(int idx = 0; idx < numRecords; idx++) {
records[idx].serialize(rout);
}
times.serialize = System.nanoTime() - times.serialize;
bout.write("</records>\n".getBytes());
byte[] serialized = bout.toByteArray();
ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
times.deserialize = System.nanoTime();
XmlRecordInput rin = new XmlRecordInput(bin);
for(int idx = 0; idx < numRecords; idx++) {
records[idx].deserialize(rin);
}
times.deserialize = System.nanoTime() - times.deserialize;
}
private static void printTimes(String type,
String format,
int numRecords,
Times times) {
System.out.println("Type: " + type + " Format: " + format +
" #Records: "+numRecords);
if (times.init != 0) {
System.out.println("Initialization Time (Per record) : "+
times.init/numRecords + " Nanoseconds");
}
if (times.serialize != 0) {
System.out.println("Serialization Time (Per Record) : "+
times.serialize/numRecords + " Nanoseconds");
}
if (times.deserialize != 0) {
System.out.println("Deserialization Time (Per Record) : "+
times.deserialize/numRecords + " Nanoseconds");
}
if (times.write != 0) {
System.out.println("Write Time (Per Record) : "+
times.write/numRecords + " Nanoseconds");
}
if (times.readFields != 0) {
System.out.println("ReadFields Time (Per Record) : "+
times.readFields/numRecords + " Nanoseconds");
}
System.out.println();
}
private static String toCamelCase(String inp) {
char firstChar = inp.charAt(0);
if (Character.isLowerCase(firstChar)) {
return ""+Character.toUpperCase(firstChar) + inp.substring(1);
}
return inp;
}
private static void exitOnError() {
String usage = "RecordBench {buffer|string|int}"+
" {binary|csv|xml} <numRecords>";
System.out.println(usage);
System.exit(1);
}
/**
* @param args the command line arguments
*/
public static void main(String[] args) throws IOException {
String version = "RecordBench v0.1";
System.out.println(version+"\n");
if (args.length != 3) {
exitOnError();
}
String typeName = args[0];
String format = args[1];
int numRecords = Integer.decode(args[2]).intValue();
Method bench = null;
try {
bench = RecordBench.class.getDeclaredMethod("run"+
toCamelCase(format) + "Bench",
new Class[] {String.class, Integer.TYPE, Times.class});
} catch (NoSuchMethodException ex) {
ex.printStackTrace();
exitOnError();
}
if (numRecords < 0) {
exitOnError();
}
// dry run
rand.setSeed(SEED);
Times times = new Times();
try {
bench.invoke(null, new Object[] {typeName, numRecords, times});
} catch (Exception ex) {
ex.printStackTrace();
System.exit(1);
}
// timed run
rand.setSeed(SEED);
try {
bench.invoke(null, new Object[] {typeName, numRecords, times});
} catch (Exception ex) {
ex.printStackTrace();
System.exit(1);
}
printTimes(typeName, format, numRecords, times);
}
}

View File

@ -1,124 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.record;
import junit.framework.*;
/**
* A Unit test for Record I/O Buffer class
*/
public class TestBuffer extends TestCase {
public TestBuffer(String testName) {
super(testName);
}
/**
* Test of set method, of class org.apache.hadoop.record.Buffer.
*/
public void testSet() {
final byte[] bytes = new byte[10];
final Buffer instance = new Buffer();
instance.set(bytes);
assertEquals("set failed", bytes, instance.get());
}
/**
* Test of copy method, of class org.apache.hadoop.record.Buffer.
*/
public void testCopy() {
final byte[] bytes = new byte[10];
final int offset = 6;
final int length = 3;
for (int idx = 0; idx < 10; idx ++) {
bytes[idx] = (byte) idx;
}
final Buffer instance = new Buffer();
instance.copy(bytes, offset, length);
assertEquals("copy failed", 3, instance.getCapacity());
assertEquals("copy failed", 3, instance.get().length);
for (int idx = 0; idx < 3; idx++) {
assertEquals("Buffer content corrupted", idx+6, instance.get()[idx]);
}
}
/**
* Test of getCount method, of class org.apache.hadoop.record.Buffer.
*/
public void testGetCount() {
final Buffer instance = new Buffer();
final int expResult = 0;
final int result = instance.getCount();
assertEquals("getSize failed", expResult, result);
}
/**
* Test of getCapacity method, of class org.apache.hadoop.record.Buffer.
*/
public void testGetCapacity() {
final Buffer instance = new Buffer();
final int expResult = 0;
final int result = instance.getCapacity();
assertEquals("getCapacity failed", expResult, result);
instance.setCapacity(100);
assertEquals("setCapacity failed", 100, instance.getCapacity());
}
/**
* Test of truncate method, of class org.apache.hadoop.record.Buffer.
*/
public void testTruncate() {
final Buffer instance = new Buffer();
instance.setCapacity(100);
assertEquals("setCapacity failed", 100, instance.getCapacity());
instance.truncate();
assertEquals("truncate failed", 0, instance.getCapacity());
}
/**
* Test of append method, of class org.apache.hadoop.record.Buffer.
*/
public void testAppend() {
final byte[] bytes = new byte[100];
final int offset = 0;
final int length = 100;
for (int idx = 0; idx < 100; idx++) {
bytes[idx] = (byte) (100-idx);
}
final Buffer instance = new Buffer();
instance.append(bytes, offset, length);
assertEquals("Buffer size mismatch", 100, instance.getCount());
for (int idx = 0; idx < 100; idx++) {
assertEquals("Buffer contents corrupted", 100-idx, instance.get()[idx]);
}
}
}

View File

@ -1,201 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.record;
import java.io.IOException;
import junit.framework.*;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.TreeMap;
/**
*/
public class TestRecordIO extends TestCase {
public TestRecordIO(String testName) {
super(testName);
}
@Override
protected void setUp() throws Exception {
}
@Override
protected void tearDown() throws Exception {
}
public void testBinary() {
File tmpfile;
try {
tmpfile = File.createTempFile("hadooprec", ".dat");
FileOutputStream ostream = new FileOutputStream(tmpfile);
BinaryRecordOutput out = new BinaryRecordOutput(ostream);
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(-4567);
r1.setLongVal(-2367L);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
RecRecord0 r0 = new RecRecord0();
r0.setStringVal("other random text");
r1.setRecordVal(r0);
r1.serialize(out, "");
ostream.close();
FileInputStream istream = new FileInputStream(tmpfile);
BinaryRecordInput in = new BinaryRecordInput(istream);
RecRecord1 r2 = new RecRecord1();
r2.deserialize(in, "");
istream.close();
tmpfile.delete();
assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
} catch (IOException ex) {
ex.printStackTrace();
}
}
public void testCsv() {
File tmpfile;
try {
tmpfile = File.createTempFile("hadooprec", ".txt");
FileOutputStream ostream = new FileOutputStream(tmpfile);
CsvRecordOutput out = new CsvRecordOutput(ostream);
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
RecRecord0 r0 = new RecRecord0();
r0.setStringVal("other random text");
r1.setRecordVal(r0);
r1.serialize(out, "");
ostream.close();
FileInputStream istream = new FileInputStream(tmpfile);
CsvRecordInput in = new CsvRecordInput(istream);
RecRecord1 r2 = new RecRecord1();
r2.deserialize(in, "");
istream.close();
tmpfile.delete();
assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
} catch (IOException ex) {
ex.printStackTrace();
}
}
public void testToString() {
try {
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
byte[] barr = new byte[256];
for (int idx = 0; idx < 256; idx++) {
barr[idx] = (byte) idx;
}
r1.setBufferVal(new Buffer(barr));
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
RecRecord0 r0 = new RecRecord0();
r0.setStringVal("other random text");
r1.setRecordVal(r0);
System.err.println("Illustrating toString bug"+r1.toString());
System.err.println("Illustrating toString bug"+r1.toString());
} catch (Throwable ex) {
assertTrue("Record.toString cannot be invoked twice in succession."+
"This bug has been fixed in the latest version.", false);
}
}
public void testXml() {
File tmpfile;
try {
tmpfile = File.createTempFile("hadooprec", ".xml");
FileOutputStream ostream = new FileOutputStream(tmpfile);
XmlRecordOutput out = new XmlRecordOutput(ostream);
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("ran\002dom &lt; %text<&more\uffff");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
RecRecord0 r0 = new RecRecord0();
r0.setStringVal("other %rando\007m &amp; >&more text");
r1.setRecordVal(r0);
r1.serialize(out, "");
ostream.close();
FileInputStream istream = new FileInputStream(tmpfile);
XmlRecordInput in = new XmlRecordInput(istream);
RecRecord1 r2 = new RecRecord1();
r2.deserialize(in, "");
istream.close();
tmpfile.delete();
assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
} catch (IOException ex) {
ex.printStackTrace();
}
}
public void testCloneable() {
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(-4567);
r1.setLongVal(-2367L);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
RecRecord0 r0 = new RecRecord0();
r0.setStringVal("other random text");
r1.setRecordVal(r0);
try {
RecRecord1 r2 = (RecRecord1) r1.clone();
assertTrue("Cloneable semantics violated. r1==r2", r1 != r2);
assertTrue("Cloneable semantics violated. r1.getClass() != r2.getClass()",
r1.getClass() == r2.getClass());
assertTrue("Cloneable semantics violated. !r2.equals(r1)", r2.equals(r1));
} catch (final CloneNotSupportedException ex) {
ex.printStackTrace();
}
}
}

View File

@ -1,241 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.record;
import java.io.IOException;
import junit.framework.*;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.TreeMap;
import org.apache.hadoop.record.meta.RecordTypeInfo;
/**
*/
public class TestRecordVersioning extends TestCase {
public TestRecordVersioning(String testName) {
super(testName);
}
@Override
protected void setUp() throws Exception {
}
@Override
protected void tearDown() throws Exception {
}
/*
* basic versioning
* write out a record and its type info, read it back using its typeinfo
*/
public void testBasic() {
File tmpfile, tmpRTIfile;
try {
tmpfile = File.createTempFile("hadooprec", ".dat");
tmpRTIfile = File.createTempFile("hadooprti", ".dat");
FileOutputStream ostream = new FileOutputStream(tmpfile);
BinaryRecordOutput out = new BinaryRecordOutput(ostream);
FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(-4567);
r1.setLongVal(-2367L);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
RecRecord0 r0 = new RecRecord0();
r0.setStringVal("other random text");
r1.setRecordVal(r0);
r1.serialize(out, "");
ostream.close();
// write out the type info
RecRecord1.getTypeInfo().serialize(outRTI);
oRTIstream.close();
// read
FileInputStream istream = new FileInputStream(tmpfile);
BinaryRecordInput in = new BinaryRecordInput(istream);
FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
RecordTypeInfo rti = new RecordTypeInfo();
rti.deserialize(inRTI);
iRTIstream.close();
RecRecord1.setTypeFilter(rti);
RecRecord1 r2 = new RecRecord1();
r2.deserialize(in, "");
istream.close();
tmpfile.delete();
tmpRTIfile.delete();
assertTrue("Serialized and deserialized versioned records do not match.", r1.equals(r2));
} catch (IOException ex) {
ex.printStackTrace();
}
}
/*
* versioning
* write out a record and its type info, read back a similar record using the written record's typeinfo
*/
public void testVersioning() {
File tmpfile, tmpRTIfile;
try {
tmpfile = File.createTempFile("hadooprec", ".dat");
tmpRTIfile = File.createTempFile("hadooprti", ".dat");
FileOutputStream ostream = new FileOutputStream(tmpfile);
BinaryRecordOutput out = new BinaryRecordOutput(ostream);
FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
// we create an array of records to write
ArrayList<RecRecordOld> recsWrite = new ArrayList<RecRecordOld>();
int i, j, k, l;
for (i=0; i<5; i++) {
RecRecordOld s1Rec = new RecRecordOld();
s1Rec.setName("This is record s1: " + i);
ArrayList<Long> iA = new ArrayList<Long>();
for (j=0; j<3; j++) {
iA.add(new Long(i+j));
}
s1Rec.setIvec(iA);
ArrayList<ArrayList<RecRecord0>> ssVec = new ArrayList<ArrayList<RecRecord0>>();
for (j=0; j<2; j++) {
ArrayList<RecRecord0> sVec = new ArrayList<RecRecord0>();
for (k=0; k<3; k++) {
RecRecord0 sRec = new RecRecord0("This is record s: ("+j+": "+k+")");
sVec.add(sRec);
}
ssVec.add(sVec);
}
s1Rec.setSvec(ssVec);
s1Rec.setInner(new RecRecord0("This is record s: " + i));
ArrayList<ArrayList<ArrayList<String>>> aaaVec = new ArrayList<ArrayList<ArrayList<String>>>();
for (l=0; l<2; l++) {
ArrayList<ArrayList<String>> aaVec = new ArrayList<ArrayList<String>>();
for (j=0; j<2; j++) {
ArrayList<String> aVec = new ArrayList<String>();
for (k=0; k<3; k++) {
aVec.add(new String("THis is a nested string: (" + l + ": " + j + ": " + k + ")"));
}
aaVec.add(aVec);
}
aaaVec.add(aaVec);
}
s1Rec.setStrvec(aaaVec);
s1Rec.setI1(100+i);
java.util.TreeMap<Byte,String> map1 = new java.util.TreeMap<Byte,String>();
map1.put(new Byte("23"), "23");
map1.put(new Byte("11"), "11");
s1Rec.setMap1(map1);
java.util.TreeMap<Integer,Long> m1 = new java.util.TreeMap<Integer,Long>();
java.util.TreeMap<Integer,Long> m2 = new java.util.TreeMap<Integer,Long>();
m1.put(new Integer(5), 5L);
m1.put(new Integer(10), 10L);
m2.put(new Integer(15), 15L);
m2.put(new Integer(20), 20L);
java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm1 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
vm1.add(m1);
vm1.add(m2);
s1Rec.setMvec1(vm1);
java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm2 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
vm2.add(m1);
s1Rec.setMvec2(vm2);
// add to our list
recsWrite.add(s1Rec);
}
// write out to file
for (RecRecordOld rec: recsWrite) {
rec.serialize(out);
}
ostream.close();
// write out the type info
RecRecordOld.getTypeInfo().serialize(outRTI);
oRTIstream.close();
// read
FileInputStream istream = new FileInputStream(tmpfile);
BinaryRecordInput in = new BinaryRecordInput(istream);
FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
RecordTypeInfo rti = new RecordTypeInfo();
// read type info
rti.deserialize(inRTI);
iRTIstream.close();
RecRecordNew.setTypeFilter(rti);
// read records
ArrayList<RecRecordNew> recsRead = new ArrayList<RecRecordNew>();
for (i=0; i<recsWrite.size(); i++) {
RecRecordNew s2Rec = new RecRecordNew();
s2Rec.deserialize(in);
recsRead.add(s2Rec);
}
istream.close();
tmpfile.delete();
tmpRTIfile.delete();
// compare
for (i=0; i<recsRead.size(); i++) {
RecRecordOld s1Out = recsWrite.get(i);
RecRecordNew s2In = recsRead.get(i);
assertTrue("Incorrectly read name2 field", null == s2In.getName2());
assertTrue("Error comparing inner fields", (0 == s1Out.getInner().compareTo(s2In.getInner())));
assertTrue("Incorrectly read ivec field", null == s2In.getIvec());
assertTrue("Incorrectly read svec field", null == s2In.getSvec());
for (j=0; j<s2In.getStrvec().size(); j++) {
ArrayList<ArrayList<String>> ss2Vec = s2In.getStrvec().get(j);
ArrayList<ArrayList<String>> ss1Vec = s1Out.getStrvec().get(j);
for (k=0; k<ss2Vec.size(); k++) {
ArrayList<String> s2Vec = ss2Vec.get(k);
ArrayList<String> s1Vec = ss1Vec.get(k);
for (l=0; l<s2Vec.size(); l++) {
assertTrue("Error comparing strVec fields", (0 == s2Vec.get(l).compareTo(s1Vec.get(l))));
}
}
}
assertTrue("Incorrectly read map1 field", null == s2In.getMap1());
for (j=0; j<s2In.getMvec2().size(); j++) {
assertTrue("Error comparing mvec2 fields", (s2In.getMvec2().get(j).equals(s1Out.getMvec2().get(j))));
}
}
} catch (IOException ex) {
ex.printStackTrace();
}
}
}

View File

@ -1,115 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.record;
import java.io.IOException;
import junit.framework.*;
import java.io.File;
import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.TreeMap;
/**
*/
public class ToCpp extends TestCase {
public ToCpp(String testName) {
super(testName);
}
@Override
protected void setUp() throws Exception {
}
@Override
protected void tearDown() throws Exception {
}
public void testBinary() {
File tmpfile;
try {
tmpfile = new File("/tmp/hadooptemp.dat");
FileOutputStream ostream = new FileOutputStream(tmpfile);
BinaryRecordOutput out = new BinaryRecordOutput(ostream);
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
r1.serialize(out, "");
ostream.close();
} catch (IOException ex) {
ex.printStackTrace();
}
}
public void testCsv() {
File tmpfile;
try {
tmpfile = new File("/tmp/hadooptemp.txt");
FileOutputStream ostream = new FileOutputStream(tmpfile);
CsvRecordOutput out = new CsvRecordOutput(ostream);
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
r1.serialize(out, "");
ostream.close();
} catch (IOException ex) {
ex.printStackTrace();
}
}
public void testXml() {
File tmpfile;
try {
tmpfile = new File("/tmp/hadooptemp.xml");
FileOutputStream ostream = new FileOutputStream(tmpfile);
XmlRecordOutput out = new XmlRecordOutput(ostream);
RecRecord1 r1 = new RecRecord1();
r1.setBoolVal(true);
r1.setByteVal((byte)0x66);
r1.setFloatVal(3.145F);
r1.setDoubleVal(1.5234);
r1.setIntVal(4567);
r1.setLongVal(0x5a5a5a5a5a5aL);
r1.setStringVal("random text");
r1.setBufferVal(new Buffer());
r1.setVectorVal(new ArrayList<String>());
r1.setMapVal(new TreeMap<String,String>());
r1.serialize(out, "");
ostream.close();
} catch (IOException ex) {
ex.printStackTrace();
}
}
}

View File

@ -108,7 +108,7 @@ public abstract class GetGroupsTestBase {
for (String group : user.getGroupNames()) {
expectedOutput += " " + group;
}
return expectedOutput + "\n";
return expectedOutput + System.getProperty("line.separator");
}
private String runTool(Configuration conf, String[] args, boolean success)

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.util;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.string2long;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@ -26,6 +28,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.test.UnitTestcaseTimeLimit;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
import org.junit.Test;
public class TestStringUtils extends UnitTestcaseTimeLimit {
@ -134,45 +137,34 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
@Test
public void testTraditionalBinaryPrefix() throws Exception {
//test string2long(..)
String[] symbol = {"k", "m", "g", "t", "p", "e"};
long m = 1024;
for(String s : symbol) {
assertEquals(0, StringUtils.TraditionalBinaryPrefix.string2long(0 + s));
assertEquals(m, StringUtils.TraditionalBinaryPrefix.string2long(1 + s));
assertEquals(0, string2long(0 + s));
assertEquals(m, string2long(1 + s));
m *= 1024;
}
assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0"));
assertEquals(1024L, StringUtils.TraditionalBinaryPrefix.string2long("1k"));
assertEquals(-1024L, StringUtils.TraditionalBinaryPrefix.string2long("-1k"));
assertEquals(1259520L,
StringUtils.TraditionalBinaryPrefix.string2long("1230K"));
assertEquals(-1259520L,
StringUtils.TraditionalBinaryPrefix.string2long("-1230K"));
assertEquals(104857600L,
StringUtils.TraditionalBinaryPrefix.string2long("100m"));
assertEquals(-104857600L,
StringUtils.TraditionalBinaryPrefix.string2long("-100M"));
assertEquals(956703965184L,
StringUtils.TraditionalBinaryPrefix.string2long("891g"));
assertEquals(-956703965184L,
StringUtils.TraditionalBinaryPrefix.string2long("-891G"));
assertEquals(501377302265856L,
StringUtils.TraditionalBinaryPrefix.string2long("456t"));
assertEquals(-501377302265856L,
StringUtils.TraditionalBinaryPrefix.string2long("-456T"));
assertEquals(11258999068426240L,
StringUtils.TraditionalBinaryPrefix.string2long("10p"));
assertEquals(-11258999068426240L,
StringUtils.TraditionalBinaryPrefix.string2long("-10P"));
assertEquals(1152921504606846976L,
StringUtils.TraditionalBinaryPrefix.string2long("1e"));
assertEquals(-1152921504606846976L,
StringUtils.TraditionalBinaryPrefix.string2long("-1E"));
assertEquals(0L, string2long("0"));
assertEquals(1024L, string2long("1k"));
assertEquals(-1024L, string2long("-1k"));
assertEquals(1259520L, string2long("1230K"));
assertEquals(-1259520L, string2long("-1230K"));
assertEquals(104857600L, string2long("100m"));
assertEquals(-104857600L, string2long("-100M"));
assertEquals(956703965184L, string2long("891g"));
assertEquals(-956703965184L, string2long("-891G"));
assertEquals(501377302265856L, string2long("456t"));
assertEquals(-501377302265856L, string2long("-456T"));
assertEquals(11258999068426240L, string2long("10p"));
assertEquals(-11258999068426240L, string2long("-10P"));
assertEquals(1152921504606846976L, string2long("1e"));
assertEquals(-1152921504606846976L, string2long("-1E"));
String tooLargeNumStr = "10e";
try {
StringUtils.TraditionalBinaryPrefix.string2long(tooLargeNumStr);
string2long(tooLargeNumStr);
fail("Test passed for a number " + tooLargeNumStr + " too large");
} catch (IllegalArgumentException e) {
assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
@ -180,7 +172,7 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
String tooSmallNumStr = "-10e";
try {
StringUtils.TraditionalBinaryPrefix.string2long(tooSmallNumStr);
string2long(tooSmallNumStr);
fail("Test passed for a number " + tooSmallNumStr + " too small");
} catch (IllegalArgumentException e) {
assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
@ -189,7 +181,7 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
String invalidFormatNumStr = "10kb";
char invalidPrefix = 'b';
try {
StringUtils.TraditionalBinaryPrefix.string2long(invalidFormatNumStr);
string2long(invalidFormatNumStr);
fail("Test passed for a number " + invalidFormatNumStr
+ " has invalid format");
} catch (IllegalArgumentException e) {
@ -199,6 +191,74 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
e.getMessage());
}
//test long2string(..)
assertEquals("0", long2String(0, null, 2));
for(int decimalPlace = 0; decimalPlace < 2; decimalPlace++) {
for(int n = 1; n < TraditionalBinaryPrefix.KILO.value; n++) {
assertEquals(n + "", long2String(n, null, decimalPlace));
assertEquals(-n + "", long2String(-n, null, decimalPlace));
}
assertEquals("1 K", long2String(1L << 10, null, decimalPlace));
assertEquals("-1 K", long2String(-1L << 10, null, decimalPlace));
}
assertEquals("8.00 E", long2String(Long.MAX_VALUE, null, 2));
assertEquals("8.00 E", long2String(Long.MAX_VALUE - 1, null, 2));
assertEquals("-8 E", long2String(Long.MIN_VALUE, null, 2));
assertEquals("-8.00 E", long2String(Long.MIN_VALUE + 1, null, 2));
final String[] zeros = {" ", ".0 ", ".00 "};
for(int decimalPlace = 0; decimalPlace < zeros.length; decimalPlace++) {
final String trailingZeros = zeros[decimalPlace];
for(int e = 11; e < Long.SIZE - 1; e++) {
final TraditionalBinaryPrefix p
= TraditionalBinaryPrefix.values()[e/10 - 1];
{ // n = 2^e
final long n = 1L << e;
final String expected = (n/p.value) + " " + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, 2));
}
{ // n = 2^e + 1
final long n = (1L << e) + 1;
final String expected = (n/p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
}
{ // n = 2^e - 1
final long n = (1L << e) - 1;
final String expected = ((n+1)/p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
}
}
}
assertEquals("1.50 K", long2String(3L << 9, null, 2));
assertEquals("1.5 K", long2String(3L << 9, null, 1));
assertEquals("1.50 M", long2String(3L << 19, null, 2));
assertEquals("2 M", long2String(3L << 19, null, 0));
assertEquals("3 G", long2String(3L << 30, null, 2));
// test byteDesc(..)
assertEquals("0 B", StringUtils.byteDesc(0));
assertEquals("-100 B", StringUtils.byteDesc(-100));
assertEquals("1 KB", StringUtils.byteDesc(1024));
assertEquals("1.50 KB", StringUtils.byteDesc(3L << 9));
assertEquals("1.50 MB", StringUtils.byteDesc(3L << 19));
assertEquals("3 GB", StringUtils.byteDesc(3L << 30));
// test formatPercent(..)
assertEquals("10%", StringUtils.formatPercent(0.1, 0));
assertEquals("10.0%", StringUtils.formatPercent(0.1, 1));
assertEquals("10.00%", StringUtils.formatPercent(0.1, 2));
assertEquals("1%", StringUtils.formatPercent(0.00543, 0));
assertEquals("0.5%", StringUtils.formatPercent(0.00543, 1));
assertEquals("0.54%", StringUtils.formatPercent(0.00543, 2));
assertEquals("0.543%", StringUtils.formatPercent(0.00543, 3));
assertEquals("0.5430%", StringUtils.formatPercent(0.00543, 4));
}
@Test
@ -314,10 +374,9 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
}
long et = System.nanoTime();
if (outer > 3) {
System.out.println(
(useOurs ? "StringUtils impl" : "Java impl") +
" #" + outer + ":" +
(et - st)/1000000 + "ms");
System.out.println( (useOurs ? "StringUtils impl" : "Java impl")
+ " #" + outer + ":" + (et - st)/1000000 + "ms, components="
+ components );
}
}
}

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.fs.http.client;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
@ -86,6 +88,7 @@ public class HttpFSFileSystem extends FileSystem
public static final String PERMISSION_PARAM = "permission";
public static final String DESTINATION_PARAM = "destination";
public static final String RECURSIVE_PARAM = "recursive";
public static final String SOURCES_PARAM = "sources";
public static final String OWNER_PARAM = "owner";
public static final String GROUP_PARAM = "group";
public static final String MODIFICATION_TIME_PARAM = "modificationtime";
@ -167,7 +170,7 @@ public class HttpFSFileSystem extends FileSystem
GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET),
INSTRUMENTATION(HTTP_GET),
APPEND(HTTP_POST),
APPEND(HTTP_POST), CONCAT(HTTP_POST),
CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
DELETE(HTTP_DELETE);
@ -528,6 +531,29 @@ public class HttpFSFileSystem extends FileSystem
HttpURLConnection.HTTP_OK);
}
/**
* Concat existing files together.
* @param f the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.
*
* @throws IOException
*/
@Override
public void concat(Path f, Path[] psrcs) throws IOException {
List<String> strPaths = new ArrayList<String>(psrcs.length);
for(Path psrc : psrcs) {
strPaths.add(psrc.toUri().getPath());
}
String srcs = StringUtils.join(",", strPaths);
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.CONCAT.toString());
params.put(SOURCES_PARAM, srcs);
HttpURLConnection conn = getConnection(Operation.CONCAT.getMethod(),
params, f, true);
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.

View File

@ -198,6 +198,47 @@ public class FSOperations {
}
/**
* Executor that performs an append FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSConcat implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private Path[] sources;
/**
* Creates a Concat executor.
*
* @param path target path to concat to.
* @param sources comma seperated absolute paths to use as sources.
*/
public FSConcat(String path, String[] sources) {
this.sources = new Path[sources.length];
for(int i = 0; i < sources.length; i++) {
this.sources[i] = new Path(sources[i]);
}
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.concat(path, sources);
return null;
}
}
/**
* Executor that performs a content-summary FileSystemAccess files system operation.
*/

View File

@ -58,6 +58,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
PARAMS_DEF.put(Operation.APPEND,
new Class[]{DoAsParam.class, DataParam.class});
PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
PARAMS_DEF.put(Operation.CREATE,
new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class,
ReplicationParam.class, BlockSizeParam.class, DataParam.class});
@ -388,6 +389,25 @@ public class HttpFSParametersProvider extends ParametersProvider {
}
}
/**
* Class for concat sources parameter.
*/
@InterfaceAudience.Private
public static class SourcesParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.SOURCES_PARAM;
/**
* Constructor.
*/
public SourcesParam() {
super(NAME, null);
}
}
/**
* Class for to-path parameter.
*/

View File

@ -22,22 +22,23 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Groups;
@ -403,9 +404,9 @@ public class HttpFSServer {
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
switch (op.value()) {
case APPEND: {
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
Boolean hasData = params.get(DataParam.NAME, DataParam.class);
if (!hasData) {
response = Response.temporaryRedirect(
@ -420,6 +421,18 @@ public class HttpFSServer {
}
break;
}
case CONCAT: {
System.out.println("HTTPFS SERVER CONCAT");
String sources = params.get(SourcesParam.NAME, SourcesParam.class);
FSOperations.FSConcat command =
new FSOperations.FSConcat(path, sources.split(","));
fsExecute(user, null, command);
AUDIT_LOG.info("[{}]", path);
System.out.println("SENT RESPONSE");
response = Response.ok().build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP POST operation [{0}]",

View File

@ -28,6 +28,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
@ -206,6 +208,30 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
}
}
private void testConcat() throws Exception {
Configuration config = getProxiedFSConf();
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(config);
fs.mkdirs(getProxiedFSTestDir());
Path path1 = new Path("/test/foo.txt");
Path path2 = new Path("/test/bar.txt");
Path path3 = new Path("/test/derp.txt");
DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
fs.close();
fs = getHttpFSFileSystem();
fs.concat(path1, new Path[]{path2, path3});
fs.close();
fs = FileSystem.get(config);
Assert.assertTrue(fs.exists(path1));
Assert.assertFalse(fs.exists(path2));
Assert.assertFalse(fs.exists(path3));
fs.close();
}
}
private void testRename() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo");
@ -450,7 +476,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
}
protected enum Operation {
GET, OPEN, CREATE, APPEND, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
}
@ -468,6 +494,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
case APPEND:
testAppend();
break;
case CONCAT:
testConcat();
case RENAME:
testRename();
break;

View File

@ -296,7 +296,32 @@ Trunk (Unreleased)
HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh)
Release 2.0.3-alpha - Unreleased
HDFS-4340. Update addBlock() to inculde inode id as additional argument.
(Brandon Li via suresh)
Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
HDFS-4470. Several HDFS tests attempt file operations on invalid HDFS
paths when running on Windows. (Chris Nauroth via suresh)
HDFS-4471. Namenode WebUI file browsing does not work with wildcard
addresses configured. (Andrew Wang via atm)
HDFS-4342. Directories configured in dfs.namenode.edits.dir.required
but not in dfs.namenode.edits.dir are silently ignored. (Arpit Agarwal
via szetszwo)
Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES
@ -314,6 +339,13 @@ Release 2.0.3-alpha - Unreleased
HDFS-4369. GetBlockKeysResponseProto does not handle null response.
(suresh)
HDFS-4451. hdfs balancer command returns exit code 1 on success instead
of 0. (Joshua Blatt via suresh)
HDFS-4350. Make enabling of stale marking on read and write paths
independent. (Andrew Wang via suresh)
NEW FEATURES
HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
@ -501,6 +533,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-3598. WebHDFS support for file concat. (Plamen Jeliazkov via shv)
HDFS-4456. Add concat to HttpFS and WebHDFS REST API docs. (plamenj2003 via tucu)
OPTIMIZATIONS
HDFS-3429. DataNode reads checksums even if client does not need them (todd)
@ -735,6 +769,31 @@ Release 2.0.3-alpha - Unreleased
HDFS-4444. Add space between total transaction time and number of
transactions in FSEditLog#printStatistics. (Stephen Chu via suresh)
HDFS-4428. FsDatasetImpl should disclose what the error is when a rename
fails. (Colin Patrick McCabe via atm)
HDFS-4452. getAdditionalBlock() can create multiple blocks if the client
times out and retries. (shv)
HDFS-4445. All BKJM ledgers are not checked while tailing, So failover will fail.
(Vinay via umamahesh)
HDFS-4462. 2NN will fail to checkpoint after an HDFS upgrade from a
pre-federation version of HDFS. (atm)
HDFS-4404. Create file failure when the machine of first attempted NameNode
is down. (Todd Lipcon via atm)
HDFS-4344. dfshealth.jsp throws NumberFormatException when
dfs.hosts/dfs.hosts.exclude includes port number. (Andy Isaacson via atm)
HDFS-4468. Use the new StringUtils methods added by HADOOP-9252 and fix
TestHDFSCLI and TestQuota. (szetszwo)
HDFS-4458. In DFSUtil.getNameServiceUris(..), convert default fs URI using
NetUtils.createSocketAddr(..) for being consistent with other addresses.
(Binglin Chang via szetszwo)
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
@ -2237,6 +2296,9 @@ Release 0.23.7 - UNRELEASED
HDFS-4288. NN accepts incremental BR as IBR in safemode (daryn via kihwal)
HDFS-4495. Allow client-side lease renewal to be retried beyond soft-limit
(kihwal)
Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -209,7 +209,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</goals>
<configuration>
<compile>false</compile>
<workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
<workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
<webFragmentFile>${project.build.directory}/hdfs-jsp-servlet-definitions.xml</webFragmentFile>
<packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
<sources>
@ -228,7 +228,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</goals>
<configuration>
<compile>false</compile>
<workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
<workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
<webFragmentFile>${project.build.directory}/secondary-jsp-servlet-definitions.xml</webFragmentFile>
<packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
<sources>
@ -247,7 +247,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</goals>
<configuration>
<compile>false</compile>
<workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
<workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
<webFragmentFile>${project.build.directory}/journal-jsp-servlet-definitions.xml</webFragmentFile>
<packageName>org.apache.hadoop.hdfs.server.journalservice</packageName>
<sources>
@ -266,7 +266,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</goals>
<configuration>
<compile>false</compile>
<workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
<workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
<webFragmentFile>${project.build.directory}/datanode-jsp-servlet-definitions.xml</webFragmentFile>
<packageName>org.apache.hadoop.hdfs.server.datanode</packageName>
<sources>
@ -301,7 +301,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>add-source</id>
<id>add-jsp-generated-sources-directory</id>
<phase>generate-sources</phase>
<goals>
<goal>add-source</goal>
@ -309,7 +309,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<configuration>
<sources>
<source>${project.build.directory}/generated-sources/java</source>
<source>${project.build.directory}/generated-src/main/jsp</source>
</sources>
</configuration>
</execution>
@ -323,14 +322,14 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</configuration>
<executions>
<execution>
<id>create-protobuf-generated-sources-directory</id>
<id>create-jsp-generated-sources-directory</id>
<phase>initialize</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="target/generated-sources/java" />
<mkdir dir="${project.build.directory}/generated-sources/java" />
</target>
</configuration>
</execution>
@ -408,80 +407,96 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<executions>
<execution>
<id>compile-proto</id>
<id>compile-protoc</id>
<phase>generate-sources</phase>
<goals>
<goal>exec</goal>
<goal>protoc</goal>
</goals>
<configuration>
<executable>protoc</executable>
<arguments>
<argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
<argument>-Isrc/main/proto/</argument>
<argument>--java_out=target/generated-sources/java</argument>
<argument>src/main/proto/hdfs.proto</argument>
<argument>src/main/proto/GetUserMappingsProtocol.proto</argument>
<argument>src/main/proto/HAZKInfo.proto</argument>
<argument>src/main/proto/InterDatanodeProtocol.proto</argument>
<argument>src/main/proto/JournalProtocol.proto</argument>
<argument>src/main/proto/RefreshAuthorizationPolicyProtocol.proto</argument>
<argument>src/main/proto/RefreshUserMappingsProtocol.proto</argument>
<argument>src/main/proto/datatransfer.proto</argument>
</arguments>
<imports>
<param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>GetUserMappingsProtocol.proto</include>
<include>HAZKInfo.proto</include>
<include>InterDatanodeProtocol.proto</include>
<include>JournalProtocol.proto</include>
<include>RefreshAuthorizationPolicyProtocol.proto</include>
<include>RefreshUserMappingsProtocol.proto</include>
<include>datatransfer.proto</include>
<include>hdfs.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
</configuration>
</execution>
<execution>
<id>compile-proto-datanode</id>
<id>compile-protoc-datanode</id>
<phase>generate-sources</phase>
<goals>
<goal>exec</goal>
<goal>protoc</goal>
</goals>
<configuration>
<executable>protoc</executable>
<arguments>
<argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
<argument>-Isrc/main/proto/</argument>
<argument>--java_out=target/generated-sources/java</argument>
<argument>src/main/proto/ClientDatanodeProtocol.proto</argument>
<argument>src/main/proto/DatanodeProtocol.proto</argument>
</arguments>
<imports>
<param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>ClientDatanodeProtocol.proto</include>
<include>DatanodeProtocol.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
</configuration>
</execution>
<execution>
<id>compile-proto-namenode</id>
<id>compile-protoc-namenode</id>
<phase>generate-sources</phase>
<goals>
<goal>exec</goal>
<goal>protoc</goal>
</goals>
<configuration>
<executable>protoc</executable>
<arguments>
<argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
<argument>-Isrc/main/proto/</argument>
<argument>--java_out=target/generated-sources/java</argument>
<argument>src/main/proto/ClientNamenodeProtocol.proto</argument>
<argument>src/main/proto/NamenodeProtocol.proto</argument>
</arguments>
<imports>
<param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>ClientNamenodeProtocol.proto</include>
<include>NamenodeProtocol.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
</configuration>
</execution>
<execution>
<id>compile-proto-qjournal</id>
<id>compile-protoc-qjournal</id>
<phase>generate-sources</phase>
<goals>
<goal>exec</goal>
<goal>protoc</goal>
</goals>
<configuration>
<executable>protoc</executable>
<arguments>
<argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
<argument>-Isrc/main/proto/</argument>
<argument>--java_out=target/generated-sources/java</argument>
<argument>src/main/proto/QJournalProtocol.proto</argument>
</arguments>
<imports>
<param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>QJournalProtocol.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
</configuration>
</execution>
</executions>

View File

@ -92,63 +92,28 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<executions>
<execution>
<id>add-source</id>
<id>compile-protoc</id>
<phase>generate-sources</phase>
<goals>
<goal>add-source</goal>
<goal>protoc</goal>
</goals>
<configuration>
<sources>
<source>${project.build.directory}/generated-sources/java</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<configuration>
<skipTests>false</skipTests>
</configuration>
<executions>
<execution>
<id>create-protobuf-generated-sources-directory</id>
<phase>initialize</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="target/generated-sources/java" />
</target>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<executions>
<execution>
<id>compile-proto</id>
<phase>generate-sources</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>protoc</executable>
<arguments>
<argument>-I../../../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
<argument>-Isrc/main/proto/</argument>
<argument>-I../../main/proto</argument>
<argument>--java_out=target/generated-sources/java</argument>
<argument>src/main/proto/bkjournal.proto</argument>
</arguments>
<imports>
<param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
<param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>bkjournal.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
</configuration>
</execution>
</executions>

View File

@ -503,7 +503,8 @@ public class BookKeeperJournalManager implements JournalManager {
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxId, boolean inProgressOk) throws IOException {
List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(inProgressOk);
List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(fromTxId,
inProgressOk);
try {
BookKeeperEditLogInputStream elis = null;
for (EditLogLedgerMetadata l : currentLedgerList) {
@ -511,6 +512,8 @@ public class BookKeeperJournalManager implements JournalManager {
if (l.isInProgress()) {
lastTxId = recoverLastTxId(l, false);
}
// Check once again, required in case of InProgress and is case of any
// gap.
if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
LedgerHandle h;
if (l.isInProgress()) { // we don't want to fence the current journal
@ -523,6 +526,8 @@ public class BookKeeperJournalManager implements JournalManager {
elis = new BookKeeperEditLogInputStream(h, l);
elis.skipTo(fromTxId);
} else {
// If mismatches then there might be some gap, so we should not check
// further.
return;
}
streams.add(elis);
@ -732,6 +737,11 @@ public class BookKeeperJournalManager implements JournalManager {
*/
List<EditLogLedgerMetadata> getLedgerList(boolean inProgressOk)
throws IOException {
return getLedgerList(-1, inProgressOk);
}
private List<EditLogLedgerMetadata> getLedgerList(long fromTxId,
boolean inProgressOk) throws IOException {
List<EditLogLedgerMetadata> ledgers
= new ArrayList<EditLogLedgerMetadata>();
try {
@ -744,6 +754,12 @@ public class BookKeeperJournalManager implements JournalManager {
try {
EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata
.read(zkc, legderMetadataPath);
if (editLogLedgerMetadata.getLastTxId() != HdfsConstants.INVALID_TXID
&& editLogLedgerMetadata.getLastTxId() < fromTxId) {
// exclude already read closed edits, but include inprogress edits
// as this will be handled in caller
continue;
}
ledgers.add(editLogLedgerMetadata);
} catch (KeeperException.NoNodeException e) {
LOG.warn("ZNode: " + legderMetadataPath

View File

@ -21,7 +21,6 @@ import static org.junit.Assert.*;
import org.junit.Test;
import org.junit.Before;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.AfterClass;
@ -34,11 +33,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.ipc.RemoteException;
@ -352,4 +349,42 @@ public class TestBookKeeperAsHASharedDir {
}
}
}
/**
* NameNode should load the edits correctly if the applicable edits are
* present in the BKJM.
*/
@Test
public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
.createJournalURI("/correctEditLogSelection").toString());
BKJMUtil.addJournalManagerDefinition(conf);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
.manageNameDfsSharedDirs(false).build();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active.
// Transition to standby current active gracefully.
cluster.transitionToStandby(0);
// Make the other Active and Roll edits multiple times
cluster.transitionToActive(1);
nn2.getRpcServer().rollEditLog();
nn2.getRpcServer().rollEditLog();
// Now One more failover. So NN1 should be able to failover successfully.
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}

View File

@ -634,10 +634,10 @@ public class DFSClient implements java.io.Closeable {
} catch (IOException e) {
// Abort if the lease has already expired.
final long elapsed = Time.now() - getLastLeaseRenewal();
if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
LOG.warn("Failed to renew lease for " + clientName + " for "
+ (elapsed/1000) + " seconds (>= soft-limit ="
+ (HdfsConstants.LEASE_SOFTLIMIT_PERIOD/1000) + " seconds.) "
+ (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
+ "Closing all files being written ...", e);
closeAllFilesBeingWritten(true);
} else {

View File

@ -181,10 +181,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
// Whether to enable datanode's stale state detection and usage
public static final String DFS_NAMENODE_CHECK_STALE_DATANODE_KEY = "dfs.namenode.check.stale.datanode";
public static final boolean DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT = false;
// Whether to enable datanode's stale state detection and usage
// Whether to enable datanode's stale state detection and usage for reads
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;
// Whether to enable datanode's stale state detection and usage for writes
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = "dfs.namenode.avoid.write.stale.datanode";
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false;
// The default value of the time interval for marking datanodes as stale
@ -195,8 +195,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_KEY = "dfs.namenode.stale.datanode.minimum.interval";
public static final int DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT = 3; // i.e. min_interval is 3 * heartbeat_interval = 9s
// When the number stale datanodes marked as stale reached this certian ratio,
// stop avoiding writing to stale nodes so as to prevent causing hotspots.
// When the percentage of stale datanodes reaches this ratio,
// allow writing to stale nodes to prevent hotspots.
public static final String DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY = "dfs.namenode.write.stale.datanode.ratio";
public static final float DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT = 0.5f;
@ -235,6 +235,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
public static final String DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
public static final String DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required";
public static final String DFS_NAMENODE_EDITS_DIR_DEFAULT = "file:///tmp/hadoop/dfs/name";
public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size";
public static final String DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";

View File

@ -115,6 +115,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
private volatile boolean closed = false;
private String src;
private final long fileId;
private final long blockSize;
private final DataChecksum checksum;
// both dataQueue and ackQueue are protected by dataQueue lock
@ -1148,7 +1149,8 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
long localstart = Time.now();
while (true) {
try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName, block, excludedNodes);
return dfsClient.namenode.addBlock(src, dfsClient.clientName,
block, excludedNodes, fileId);
} catch (RemoteException e) {
IOException ue =
e.unwrapRemoteException(FileNotFoundException.class,
@ -1261,20 +1263,21 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
return value;
}
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize, Progressable progress,
DataChecksum checksum, short replication) throws IOException {
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
HdfsFileStatus stat, DataChecksum checksum) throws IOException {
super(checksum, checksum.getBytesPerChecksum(), checksum.getChecksumSize());
int bytesPerChecksum = checksum.getBytesPerChecksum();
this.dfsClient = dfsClient;
this.src = src;
this.blockSize = blockSize;
this.blockReplication = replication;
this.fileId = stat.getFileId();
this.blockSize = stat.getBlockSize();
this.blockReplication = stat.getReplication();
this.progress = progress;
if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug(
"Set non-null progress callback on DFSOutputStream " + src);
}
final int bytesPerChecksum = checksum.getBytesPerChecksum();
if ( bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum +
") and blockSize(" + blockSize +
@ -1286,19 +1289,27 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
}
/** Construct a new output stream for creating a file. */
private DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, Progressable progress, int buffersize,
private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat,
EnumSet<CreateFlag> flag, Progressable progress,
DataChecksum checksum) throws IOException {
this(dfsClient, src, blockSize, progress, checksum, replication);
this(dfsClient, src, progress, stat, checksum);
this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);
computePacketChunkSize(dfsClient.getConf().writePacketSize,
checksum.getBytesPerChecksum());
streamer = new DataStreamer();
}
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize, Progressable progress, int buffersize,
DataChecksum checksum) throws IOException {
final HdfsFileStatus stat;
try {
dfsClient.namenode.create(
src, masked, dfsClient.clientName, new EnumSetWritable<CreateFlag>(flag), createParent, replication, blockSize);
stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
new EnumSetWritable<CreateFlag>(flag), createParent, replication,
blockSize);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
@ -1309,30 +1320,20 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
SafeModeException.class,
UnresolvedPathException.class);
}
streamer = new DataStreamer();
}
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize, Progressable progress, int buffersize,
DataChecksum checksum) throws IOException {
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, masked,
flag, createParent, replication, blockSize, progress, buffersize,
checksum);
out.streamer.start();
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
flag, progress, checksum);
out.start();
return out;
}
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
LocatedBlock lastBlock, HdfsFileStatus stat,
private DFSOutputStream(DFSClient dfsClient, String src,
Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat,
DataChecksum checksum) throws IOException {
this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication());
this(dfsClient, src, progress, stat, checksum);
initialFileSize = stat.getLen(); // length of file when opened
//
// The last partial block of the file has to be filled.
//
if (lastBlock != null) {
// indicate that we are appending to an existing block
bytesCurBlock = lastBlock.getBlockSize();
@ -1347,9 +1348,9 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
int buffersize, Progressable progress, LocatedBlock lastBlock,
HdfsFileStatus stat, DataChecksum checksum) throws IOException {
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, buffersize,
final DFSOutputStream out = new DFSOutputStream(dfsClient, src,
progress, lastBlock, stat, checksum);
out.streamer.start();
out.start();
return out;
}
@ -1716,6 +1717,10 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
isClosed();
}
private synchronized void start() {
streamer.start();
}
/**
* Aborts this output stream and releases any system
* resources associated with this stream.

View File

@ -134,7 +134,7 @@ public class DFSUtil {
/**
* Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
* Decommissioned/stale nodes are moved to the end of the array on sorting
* with this compartor.
* with this comparator.
*/
@InterfaceAudience.Private
public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
@ -144,7 +144,7 @@ public class DFSUtil {
* Constructor of DecomStaleComparator
*
* @param interval
* The time invertal for marking datanodes as stale is passed from
* The time interval for marking datanodes as stale is passed from
* outside, since the interval may be changed dynamically
*/
public DecomStaleComparator(long interval) {
@ -766,6 +766,13 @@ public class DFSUtil {
// Add the default URI if it is an HDFS URI.
URI defaultUri = FileSystem.getDefaultUri(conf);
// checks if defaultUri is ip:port format
// and convert it to hostname:port format
if (defaultUri != null && (defaultUri.getPort() != -1)) {
defaultUri = createUri(defaultUri.getScheme(),
NetUtils.createSocketAddr(defaultUri.getHost(),
defaultUri.getPort()));
}
if (defaultUri != null &&
HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
!nonPreferredUris.contains(defaultUri)) {
@ -925,6 +932,11 @@ public class DFSUtil {
return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
}
/** Convert percentage to a string. */
public static String percent2String(double percentage) {
return StringUtils.format("%.2f%%", percentage);
}
/**
* Round bytes to GiB (gibibyte)
* @param bytes number of bytes

View File

@ -150,6 +150,8 @@ public interface ClientProtocol {
* @param replication block replication factor.
* @param blockSize maximum block size.
*
* @return the status of the created file, it could be null if the server
* doesn't support returning the file status
* @throws AccessControlException If access is denied
* @throws AlreadyBeingCreatedException if the path does not exist.
* @throws DSQuotaExceededException If file creation violates disk space
@ -168,13 +170,14 @@ public interface ClientProtocol {
* RuntimeExceptions:
* @throws InvalidPathException Path <code>src</code> is invalid
*/
public void create(String src, FsPermission masked, String clientName,
EnumSetWritable<CreateFlag> flag, boolean createParent,
short replication, long blockSize) throws AccessControlException,
AlreadyBeingCreatedException, DSQuotaExceededException,
FileAlreadyExistsException, FileNotFoundException,
NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
UnresolvedLinkException, IOException;
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize)
throws AccessControlException, AlreadyBeingCreatedException,
DSQuotaExceededException, FileAlreadyExistsException,
FileNotFoundException, NSQuotaExceededException,
ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
IOException;
/**
* Append to the end of the file.
@ -296,6 +299,7 @@ public interface ClientProtocol {
* @param previous previous block
* @param excludeNodes a list of nodes that should not be
* allocated for the current block
* @param fileId the id uniquely identifying a file
*
* @return LocatedBlock allocated block information.
*
@ -310,7 +314,7 @@ public interface ClientProtocol {
*/
@Idempotent
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludeNodes)
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId)
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException;

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
@InterfaceAudience.Private
@InterfaceStability.Evolving
@ -41,9 +41,9 @@ public class DSQuotaExceededException extends QuotaExceededException {
public String getMessage() {
String msg = super.getMessage();
if (msg == null) {
return "The DiskSpace quota" + (pathName==null?"":(" of " + pathName)) +
" is exceeded: quota=" + StringUtils.humanReadableInt(quota) +
" diskspace consumed=" + StringUtils.humanReadableInt(count);
return "The DiskSpace quota" + (pathName==null?"": " of " + pathName)
+ " is exceeded: quota = " + quota + " B = " + long2String(quota, "B", 2)
+ " but diskspace consumed = " + count + " B = " + long2String(count, "B", 2);
} else {
return msg;
}

View File

@ -17,10 +17,13 @@
*/
package org.apache.hadoop.hdfs.protocol;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
@ -244,8 +247,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
buffer.append("DFS Used%: "+StringUtils.limitDecimalTo2(usedPercent)+"%\n");
buffer.append("DFS Remaining%: "+StringUtils.limitDecimalTo2(remainingPercent)+"%\n");
buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
return buffer.toString();
}
@ -269,7 +272,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
}
buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
buffer.append(" " + StringUtils.limitDecimalTo2(((1.0*u)/c)*100)+"%");
buffer.append(" " + percent2String(u/(double)c));
buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
buffer.append(" " + new Date(lastUpdate));
return buffer.toString();

View File

@ -40,6 +40,7 @@ public class HdfsFileStatus {
private FsPermission permission;
private String owner;
private String group;
private long fileId;
public static final byte[] EMPTY_NAME = new byte[0];
@ -55,11 +56,12 @@ public class HdfsFileStatus {
* @param owner the owner of the path
* @param group the group of the path
* @param path the local name in java UTF8 encoding the same as that in-memory
* @param fileId the file id
*/
public HdfsFileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group,
byte[] symlink, byte[] path) {
byte[] symlink, byte[] path, long fileId) {
this.length = length;
this.isdir = isdir;
this.block_replication = (short)block_replication;
@ -75,6 +77,7 @@ public class HdfsFileStatus {
this.group = (group == null) ? "" : group;
this.symlink = symlink;
this.path = path;
this.fileId = fileId;
}
/**
@ -223,4 +226,8 @@ public class HdfsFileStatus {
final public byte[] getSymlinkInBytes() {
return symlink;
}
final public long getFileId() {
return fileId;
}
}

View File

@ -44,15 +44,15 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
* @param group group
* @param symlink symbolic link
* @param path local path name in java UTF8 format
* @param fileId the file id
* @param locations block locations
*/
public HdfsLocatedFileStatus(long length, boolean isdir,
int block_replication,
long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group,
byte[] symlink, byte[] path, LocatedBlocks locations) {
int block_replication, long blocksize, long modification_time,
long access_time, FsPermission permission, String owner, String group,
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, symlink, path);
access_time, permission, owner, group, symlink, path, fileId);
this.locations = locations;
}

View File

@ -268,14 +268,19 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public CreateResponseProto create(RpcController controller,
CreateRequestProto req) throws ServiceException {
try {
server.create(req.getSrc(), PBHelper.convert(req.getMasked()),
req.getClientName(), PBHelper.convert(req.getCreateFlag()),
req.getCreateParent(), (short) req.getReplication(),
req.getBlockSize());
HdfsFileStatus result = server.create(req.getSrc(),
PBHelper.convert(req.getMasked()), req.getClientName(),
PBHelper.convert(req.getCreateFlag()), req.getCreateParent(),
(short) req.getReplication(), req.getBlockSize());
if (result != null) {
return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result))
.build();
}
return VOID_CREATE_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_CREATE_RESPONSE;
}
@Override
@ -348,13 +353,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
try {
List<DatanodeInfoProto> excl = req.getExcludeNodesList();
LocatedBlock result = server.addBlock(req.getSrc(), req.getClientName(),
LocatedBlock result = server.addBlock(
req.getSrc(),
req.getClientName(),
req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
(excl == null ||
excl.size() == 0) ? null :
PBHelper.convert(excl.toArray(new DatanodeInfoProto[excl.size()])));
return AddBlockResponseProto.newBuilder().setBlock(
PBHelper.convert(result)).build();
(excl == null || excl.size() == 0) ? null : PBHelper.convert(excl
.toArray(new DatanodeInfoProto[excl.size()])), req.getFileId());
return AddBlockResponseProto.newBuilder()
.setBlock(PBHelper.convert(result)).build();
} catch (IOException e) {
throw new ServiceException(e);
}

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Append
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
@ -100,6 +101,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
@ -193,13 +195,14 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public void create(String src, FsPermission masked, String clientName,
EnumSetWritable<CreateFlag> flag, boolean createParent,
short replication, long blockSize) throws AccessControlException,
AlreadyBeingCreatedException, DSQuotaExceededException,
FileAlreadyExistsException, FileNotFoundException,
NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
UnresolvedLinkException, IOException {
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize)
throws AccessControlException, AlreadyBeingCreatedException,
DSQuotaExceededException, FileAlreadyExistsException,
FileNotFoundException, NSQuotaExceededException,
ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
IOException {
CreateRequestProto req = CreateRequestProto.newBuilder()
.setSrc(src)
.setMasked(PBHelper.convert(masked))
@ -210,7 +213,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
.setBlockSize(blockSize)
.build();
try {
rpcProxy.create(null, req);
CreateResponseProto res = rpcProxy.create(null, req);
return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@ -297,12 +301,12 @@ public class ClientNamenodeProtocolTranslatorPB implements
@Override
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludeNodes)
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId)
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException {
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder().setSrc(src)
.setClientName(clientName);
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
.setSrc(src).setClientName(clientName).setFileId(fileId);
if (previous != null)
req.setPrevious(PBHelper.convert(previous));
if (excludeNodes != null)

View File

@ -106,6 +106,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@ -1045,6 +1046,7 @@ public class PBHelper {
fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(),
fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null);
}
@ -1069,6 +1071,7 @@ public class PBHelper {
setPermission(PBHelper.convert(fs.getPermission())).
setOwner(fs.getOwner()).
setGroup(fs.getGroup()).
setFileId(fs.getFileId()).
setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
if (fs.isSymlink()) {
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));

View File

@ -1333,8 +1333,9 @@ public class Balancer {
// Exit status
enum ReturnStatus {
SUCCESS(1),
IN_PROGRESS(0),
// These int values will map directly to the balancer process's exit code.
SUCCESS(0),
IN_PROGRESS(1),
ALREADY_RUNNING(-1),
NO_MOVE_BLOCK(-2),
NO_MOVE_PROGRESS(-3),
@ -1507,7 +1508,12 @@ public class Balancer {
}
static class Cli extends Configured implements Tool {
/** Parse arguments and then run Balancer */
/**
* Parse arguments and then run Balancer.
*
* @param args command specific arguments.
* @return exit code. 0 indicates success, non-zero indicates failure.
*/
@Override
public int run(String[] args) {
final long startTime = Time.now();

View File

@ -127,14 +127,25 @@ public class DatanodeManager {
/** Ask Datanode only up to this many blocks to delete. */
final int blockInvalidateLimit;
/** Whether or not to check stale DataNodes for read/write */
private final boolean checkForStaleDataNodes;
/** The interval for judging stale DataNodes for read/write */
private final long staleInterval;
/** Whether or not to avoid using stale DataNodes for writing */
private volatile boolean avoidStaleDataNodesForWrite;
/** Whether or not to avoid using stale DataNodes for reading */
private final boolean avoidStaleDataNodesForRead;
/**
* Whether or not to avoid using stale DataNodes for writing.
* Note that, even if this is configured, the policy may be
* temporarily disabled when a high percentage of the nodes
* are marked as stale.
*/
private final boolean avoidStaleDataNodesForWrite;
/**
* When the ratio of stale datanodes reaches this number, stop avoiding
* writing to stale datanodes, i.e., continue using stale nodes for writing.
*/
private final float ratioUseStaleDataNodesForWrite;
/** The number of stale DataNodes */
private volatile int numStaleNodes;
@ -184,13 +195,22 @@ public class DatanodeManager {
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ "=" + this.blockInvalidateLimit);
checkForStaleDataNodes = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT);
staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
avoidStaleDataNodesForWrite = getAvoidStaleForWriteFromConf(conf,
checkForStaleDataNodes);
this.avoidStaleDataNodesForRead = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
this.avoidStaleDataNodesForWrite = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
this.ratioUseStaleDataNodesForWrite = conf.getFloat(
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
Preconditions.checkArgument(
(ratioUseStaleDataNodesForWrite > 0 &&
ratioUseStaleDataNodesForWrite <= 1.0f),
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
" = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
"It should be a positive non-zero float value, not greater than 1.0f.");
}
private static long getStaleIntervalFromConf(Configuration conf,
@ -230,22 +250,6 @@ public class DatanodeManager {
return staleInterval;
}
static boolean getAvoidStaleForWriteFromConf(Configuration conf,
boolean checkForStale) {
boolean avoid = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
boolean avoidStaleDataNodesForWrite = checkForStale && avoid;
if (!checkForStale && avoid) {
LOG.warn("Cannot set "
+ DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY
+ " as false while setting "
+ DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY
+ " as true.");
}
return avoidStaleDataNodesForWrite;
}
void activate(final Configuration conf) {
final DecommissionManager dm = new DecommissionManager(namesystem, blockManager);
this.decommissionthread = new Daemon(dm.new Monitor(
@ -299,7 +303,7 @@ public class DatanodeManager {
client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
}
Comparator<DatanodeInfo> comparator = checkForStaleDataNodes ?
Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ?
new DFSUtil.DecomStaleComparator(staleInterval) :
DFSUtil.DECOM_COMPARATOR;
@ -827,30 +831,18 @@ public class DatanodeManager {
/* Getter and Setter for stale DataNodes related attributes */
/**
* @return whether or not to avoid writing to stale datanodes
*/
public boolean isAvoidingStaleDataNodesForWrite() {
return avoidStaleDataNodesForWrite;
}
/**
* Set the value of {@link DatanodeManager#avoidStaleDataNodesForWrite}.
* The HeartbeatManager disable avoidStaleDataNodesForWrite when more than
* half of the DataNodes are marked as stale.
* Whether stale datanodes should be avoided as targets on the write path.
* The result of this function may change if the number of stale datanodes
* eclipses a configurable threshold.
*
* @param avoidStaleDataNodesForWrite
* The value to set to
* {@link DatanodeManager#avoidStaleDataNodesForWrite}
* @return whether stale datanodes should be avoided on the write path
*/
void setAvoidStaleDataNodesForWrite(boolean avoidStaleDataNodesForWrite) {
this.avoidStaleDataNodesForWrite = avoidStaleDataNodesForWrite;
}
/**
* @return Whether or not to check stale DataNodes for R/W
*/
boolean isCheckingForStaleDataNodes() {
return checkForStaleDataNodes;
public boolean shouldAvoidStaleDataNodesForWrite() {
// If # stale exceeds maximum staleness ratio, disable stale
// datanode avoidance on the write path
return avoidStaleDataNodesForWrite &&
(numStaleNodes <= heartbeatManager.getLiveDatanodeCount()
* ratioUseStaleDataNodesForWrite);
}
/**
@ -967,7 +959,7 @@ public class DatanodeManager {
port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT;
} else {
hostStr = hostLine.substring(0, idx);
port = Integer.valueOf(hostLine.substring(idx));
port = Integer.valueOf(hostLine.substring(idx+1));
}
if (InetAddresses.isInetAddress(hostStr)) {

View File

@ -30,8 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
/**
* Manage the heartbeats received from datanodes.
* The datanode list and statistics are synchronized
@ -56,16 +54,7 @@ class HeartbeatManager implements DatanodeStatistics {
private final long heartbeatRecheckInterval;
/** Heartbeat monitor thread */
private final Daemon heartbeatThread = new Daemon(new Monitor());
/**
* The initial setting of end user which indicates whether or not to avoid
* writing to stale datanodes.
*/
private final boolean initialAvoidWriteStaleNodes;
/**
* When the ratio of stale datanodes reaches this number, stop avoiding
* writing to stale datanodes, i.e., continue using stale nodes for writing.
*/
private final float ratioUseStaleDataNodesForWrite;
final Namesystem namesystem;
final BlockManager blockManager;
@ -74,30 +63,25 @@ class HeartbeatManager implements DatanodeStatistics {
final BlockManager blockManager, final Configuration conf) {
this.namesystem = namesystem;
this.blockManager = blockManager;
boolean checkStaleNodes = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT);
boolean avoidStaleDataNodesForWrite = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
long recheckInterval = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 min
long staleInterval = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);// 30s
this.initialAvoidWriteStaleNodes = DatanodeManager
.getAvoidStaleForWriteFromConf(conf, checkStaleNodes);
this.ratioUseStaleDataNodesForWrite = conf.getFloat(
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
Preconditions.checkArgument(
(ratioUseStaleDataNodesForWrite > 0 &&
ratioUseStaleDataNodesForWrite <= 1.0f),
DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
" = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
"It should be a positive non-zero float value, not greater than 1.0f.");
this.heartbeatRecheckInterval = (checkStaleNodes
&& initialAvoidWriteStaleNodes
&& staleInterval < recheckInterval) ? staleInterval : recheckInterval;
if (avoidStaleDataNodesForWrite && staleInterval < recheckInterval) {
this.heartbeatRecheckInterval = staleInterval;
LOG.info("Setting heartbeat recheck interval to " + staleInterval
+ " since " + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY
+ " is less than "
+ DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
} else {
this.heartbeatRecheckInterval = recheckInterval;
}
}
void activate(Configuration conf) {
@ -242,7 +226,6 @@ class HeartbeatManager implements DatanodeStatistics {
if (namesystem.isInSafeMode()) {
return;
}
boolean checkStaleNodes = dm.isCheckingForStaleDataNodes();
boolean allAlive = false;
while (!allAlive) {
// locate the first dead node.
@ -254,29 +237,14 @@ class HeartbeatManager implements DatanodeStatistics {
if (dead == null && dm.isDatanodeDead(d)) {
stats.incrExpiredHeartbeats();
dead = d;
if (!checkStaleNodes) {
break;
}
}
if (checkStaleNodes &&
d.isStale(dm.getStaleInterval())) {
if (d.isStale(dm.getStaleInterval())) {
numOfStaleNodes++;
}
}
// Change whether to avoid using stale datanodes for writing
// based on proportion of stale datanodes
if (checkStaleNodes) {
// Set the number of stale nodes in the DatanodeManager
dm.setNumStaleNodes(numOfStaleNodes);
if (numOfStaleNodes >
datanodes.size() * ratioUseStaleDataNodesForWrite) {
dm.setAvoidStaleDataNodesForWrite(false);
} else {
if (this.initialAvoidWriteStaleNodes) {
dm.setAvoidStaleDataNodesForWrite(true);
}
}
}
}
allAlive = dead == null;

View File

@ -905,7 +905,7 @@ public abstract class Storage extends StorageInfo {
props.setProperty("storageType", storageType.toString());
props.setProperty("namespaceID", String.valueOf(namespaceID));
// Set clusterID in version with federation support
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
if (versionSupportsFederation()) {
props.setProperty("clusterID", clusterID);
}
props.setProperty("cTime", String.valueOf(cTime));

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.server.common;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import com.google.common.base.Joiner;
@ -78,6 +80,10 @@ public class StorageInfo {
cTime = from.cTime;
}
public boolean versionSupportsFederation() {
return LayoutVersion.supports(Feature.FEDERATION, layoutVersion);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();

View File

@ -76,6 +76,7 @@ import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@ -399,13 +400,17 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
final File dstfile = new File(destdir, b.getBlockName());
final File srcmeta = FsDatasetUtil.getMetaFile(srcfile, b.getGenerationStamp());
final File dstmeta = FsDatasetUtil.getMetaFile(dstfile, b.getGenerationStamp());
if (!srcmeta.renameTo(dstmeta)) {
try {
NativeIO.renameTo(srcmeta, dstmeta);
} catch (IOException e) {
throw new IOException("Failed to move meta file for " + b
+ " from " + srcmeta + " to " + dstmeta);
+ " from " + srcmeta + " to " + dstmeta, e);
}
if (!srcfile.renameTo(dstfile)) {
try {
NativeIO.renameTo(srcfile, dstfile);
} catch (IOException e) {
throw new IOException("Failed to move block file for " + b
+ " from " + srcfile + " to " + dstfile.getAbsolutePath());
+ " from " + srcfile + " to " + dstfile.getAbsolutePath(), e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("addBlock: Moved " + srcmeta + " to " + dstmeta
@ -532,10 +537,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming " + oldmeta + " to " + newmeta);
}
if (!oldmeta.renameTo(newmeta)) {
try {
NativeIO.renameTo(oldmeta, newmeta);
} catch (IOException e) {
throw new IOException("Block " + replicaInfo + " reopen failed. " +
" Unable to move meta file " + oldmeta +
" to rbw dir " + newmeta);
" to rbw dir " + newmeta, e);
}
// rename block file to rbw directory
@ -543,14 +550,18 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
LOG.debug("Renaming " + blkfile + " to " + newBlkFile
+ ", file length=" + blkfile.length());
}
if (!blkfile.renameTo(newBlkFile)) {
if (!newmeta.renameTo(oldmeta)) { // restore the meta file
try {
NativeIO.renameTo(blkfile, newBlkFile);
} catch (IOException e) {
try {
NativeIO.renameTo(newmeta, oldmeta);
} catch (IOException ex) {
LOG.warn("Cannot move meta file " + newmeta +
"back to the finalized directory " + oldmeta);
"back to the finalized directory " + oldmeta, ex);
}
throw new IOException("Block " + replicaInfo + " reopen failed. " +
" Unable to move block file " + blkfile +
" to rbw dir " + newBlkFile);
" to rbw dir " + newBlkFile, e);
}
// Replace finalized replica by a RBW replica in replicas map
@ -657,11 +668,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming " + oldmeta + " to " + newmeta);
}
if (!oldmeta.renameTo(newmeta)) {
try {
NativeIO.renameTo(oldmeta, newmeta);
} catch (IOException e) {
replicaInfo.setGenerationStamp(oldGS); // restore old GS
throw new IOException("Block " + replicaInfo + " reopen failed. " +
" Unable to move meta file " + oldmeta +
" to " + newmeta);
" to " + newmeta, e);
}
}

View File

@ -123,6 +123,10 @@ public class CheckpointSignature extends StorageInfo
blockpoolID.equals(si.getBlockPoolID());
}
boolean namespaceIdMatches(FSImage si) {
return namespaceID == si.getStorage().namespaceID;
}
void validateStorageInfo(FSImage si) throws IOException {
if (!isSameCluster(si)
|| !storageVersionMatches(si.getStorage())) {

View File

@ -569,12 +569,10 @@ class ClusterJspHelper {
toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));
// dfsUsedPercent
toXmlItemBlock(doc, "DFS Used%",
StringUtils.limitDecimalTo2(dfsUsedPercent)+ "%");
toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent));
// dfsRemainingPercent
toXmlItemBlock(doc, "DFS Remaining%",
StringUtils.limitDecimalTo2(dfsRemainingPercent) + "%");
toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent));
doc.endTag(); // storage

View File

@ -1985,7 +1985,8 @@ public class FSDirectory implements Closeable {
node.getUserName(),
node.getGroupName(),
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
path);
path,
node.getId());
}
/**
@ -2022,6 +2023,7 @@ public class FSDirectory implements Closeable {
node.getGroupName(),
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
path,
node.getId(),
loc);
}

View File

@ -127,6 +127,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -422,6 +423,56 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
/**
* Check the supplied configuration for correctness.
* @param conf Supplies the configuration to validate.
* @throws IOException if the configuration could not be queried.
* @throws IllegalArgumentException if the configuration is invalid.
*/
private static void checkConfiguration(Configuration conf)
throws IOException {
final Collection<URI> namespaceDirs =
FSNamesystem.getNamespaceDirs(conf);
final Collection<URI> editsDirs =
FSNamesystem.getNamespaceEditsDirs(conf);
final Collection<URI> requiredEditsDirs =
FSNamesystem.getRequiredNamespaceEditsDirs(conf);
final Collection<URI> sharedEditsDirs =
FSNamesystem.getSharedEditsDirs(conf);
for (URI u : requiredEditsDirs) {
if (u.toString().compareTo(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT) == 0) {
continue;
}
// Each required directory must also be in editsDirs or in
// sharedEditsDirs.
if (!editsDirs.contains(u) &&
!sharedEditsDirs.contains(u)) {
throw new IllegalArgumentException(
"Required edits directory " + u.toString() + " not present in " +
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + ". " +
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + "=" +
editsDirs.toString() + "; " +
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY + "=" +
requiredEditsDirs.toString() + ". " +
DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY + "=" +
sharedEditsDirs.toString() + ".");
}
}
if (namespaceDirs.size() == 1) {
LOG.warn("Only one image storage directory ("
+ DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss"
+ " due to lack of redundant storage directories!");
}
if (editsDirs.size() == 1) {
LOG.warn("Only one namespace edits storage directory ("
+ DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss"
+ " due to lack of redundant storage directories!");
}
}
/**
* Instantiates an FSNamesystem loaded from the image and edits
@ -434,39 +485,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
*/
public static FSNamesystem loadFromDisk(Configuration conf)
throws IOException {
Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
List<URI> namespaceEditsDirs =
FSNamesystem.getNamespaceEditsDirs(conf);
return loadFromDisk(conf, namespaceDirs, namespaceEditsDirs);
}
/**
* Instantiates an FSNamesystem loaded from the image and edits
* directories passed.
*
* @param conf the Configuration which specifies the storage directories
* from which to load
* @param namespaceDirs directories to load the fsimages
* @param namespaceEditsDirs directories to load the edits from
* @return an FSNamesystem which contains the loaded namespace
* @throws IOException if loading fails
*/
public static FSNamesystem loadFromDisk(Configuration conf,
Collection<URI> namespaceDirs, List<URI> namespaceEditsDirs)
throws IOException {
if (namespaceDirs.size() == 1) {
LOG.warn("Only one image storage directory ("
+ DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss"
+ " due to lack of redundant storage directories!");
}
if (namespaceEditsDirs.size() == 1) {
LOG.warn("Only one namespace edits storage directory ("
+ DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss"
+ " due to lack of redundant storage directories!");
}
FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs);
checkConfiguration(conf);
FSImage fsImage = new FSImage(conf,
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
StartupOption startOpt = NameNode.getStartupOption(conf);
if (startOpt == StartupOption.RECOVER) {
@ -913,7 +936,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
"\n\t\t- use Backup Node as a persistent and up-to-date storage " +
"of the file system meta-data.");
} else if (dirNames.isEmpty()) {
dirNames = Collections.singletonList("file:///tmp/hadoop/dfs/name");
dirNames = Collections.singletonList(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
}
return Util.stringCollectionAsURIs(dirNames);
}
@ -1772,16 +1796,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Create a new file entry in the namespace.
*
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#create()}
* {@link ClientProtocol#create()}, except it returns valid file status
* upon success
*/
void startFile(String src, PermissionStatus permissions, String holder,
String clientMachine, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize) throws AccessControlException,
SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
HdfsFileStatus startFile(String src, PermissionStatus permissions,
String holder, String clientMachine, EnumSet<CreateFlag> flag,
boolean createParent, short replication, long blockSize)
throws AccessControlException, SafeModeException,
FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException {
try {
startFileInt(src, permissions, holder, clientMachine, flag, createParent,
replication, blockSize);
return startFileInt(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize);
} catch (AccessControlException e) {
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(false, UserGroupInformation.getCurrentUser(),
@ -1792,18 +1818,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
private void startFileInt(String src, PermissionStatus permissions, String holder,
String clientMachine, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize) throws AccessControlException,
SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
private HdfsFileStatus startFileInt(String src, PermissionStatus permissions,
String holder, String clientMachine, EnumSet<CreateFlag> flag,
boolean createParent, short replication, long blockSize)
throws AccessControlException, SafeModeException,
FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException {
boolean skipSync = false;
final HdfsFileStatus stat;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
startFileInternal(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize);
stat = dir.getFileInfo(src, false);
} catch (StandbyException se) {
skipSync = true;
throw se;
@ -1817,11 +1846,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
if (isAuditEnabled() && isExternalInvocation()) {
final HdfsFileStatus stat = dir.getFileInfo(src, false);
logAuditEvent(UserGroupInformation.getCurrentUser(),
getRemoteIp(),
"create", src, null, stat);
}
return stat;
}
/**
@ -2192,20 +2221,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* are replicated. Will return an empty 2-elt array if we want the
* client to "try again later".
*/
LocatedBlock getAdditionalBlock(String src,
String clientName,
ExtendedBlock previous,
HashMap<Node, Node> excludedNodes
)
LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
ExtendedBlock previous, HashMap<Node, Node> excludedNodes)
throws LeaseExpiredException, NotReplicatedYetException,
QuotaExceededException, SafeModeException, UnresolvedLinkException,
IOException {
checkBlock(previous);
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
long fileLength, blockSize;
long blockSize;
int replication;
DatanodeDescriptor clientNode = null;
Block newBlock = null;
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
@ -2213,10 +2236,83 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
+src+" for "+clientName);
}
// Part I. Analyze the state of the file with respect to the input data.
readLock();
try {
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
final INode[] inodes = analyzeFileState(
src, fileId, clientName, previous, onRetryBlock).getINodes();
final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1];
if(onRetryBlock[0] != null) {
// This is a retry. Just return the last block.
return onRetryBlock[0];
}
blockSize = pendingFile.getPreferredBlockSize();
clientNode = pendingFile.getClientNode();
replication = pendingFile.getBlockReplication();
} finally {
readUnlock();
}
// choose targets for the new block to be allocated.
final DatanodeDescriptor targets[] = getBlockManager().chooseTarget(
src, replication, clientNode, excludedNodes, blockSize);
// Part II.
// Allocate a new block, add it to the INode and the BlocksMap.
Block newBlock = null;
long offset;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
// Run the full analysis again, since things could have changed
// while chooseTarget() was executing.
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
INodesInPath inodesInPath =
analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
INode[] inodes = inodesInPath.getINodes();
final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1];
if(onRetryBlock[0] != null) {
// This is a retry. Just return the last block.
return onRetryBlock[0];
}
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile,
ExtendedBlock.getLocalBlock(previous));
// allocate new block, record block locations in INode.
newBlock = createNewBlock();
saveAllocatedBlock(src, inodesInPath, newBlock, targets);
dir.persistBlocks(src, pendingFile);
offset = pendingFile.computeFileSize(true);
} finally {
writeUnlock();
}
if (persistBlocks) {
getEditLog().logSync();
}
// Return located block
return makeLocatedBlock(newBlock, targets, offset);
}
INodesInPath analyzeFileState(String src,
long fileId,
String clientName,
ExtendedBlock previous,
LocatedBlock[] onRetryBlock)
throws IOException {
assert hasReadOrWriteLock();
checkBlock(previous);
onRetryBlock[0] = null;
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException("Cannot add block to " + src, safeMode);
}
@ -2224,12 +2320,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// have we exceeded the configured limit of fs objects.
checkFsObjectLimit();
INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
final INodesInPath inodesInPath =
dir.rootDir.getExistingPathINodes(src, true);
final INode[] inodes = inodesInPath.getINodes();
final INodeFileUnderConstruction pendingFile
= checkLease(src, fileId, clientName, inodes[inodes.length - 1]);
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
// doesn't match up with what we think is the last block. There are
// three possibilities:
// four possibilities:
// 1) This is the first block allocation of an append() pipeline
// which started appending exactly at a block boundary.
// In this case, the client isn't passed the previous block,
@ -2241,10 +2342,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// timeout, or because of an HA failover. In that case, we know
// by the fact that the client is re-issuing the RPC that it
// never began to write to the old block. Hence it is safe to
// abandon it and allocate a new one.
// to return the existing block.
// 3) This is an entirely bogus request/bug -- we should error out
// rather than potentially appending a new block with an empty
// one in the middle, etc
// 4) This is a retry from a client that timed out while
// the prior getAdditionalBlock() is still being processed,
// currently working on chooseTarget().
// There are no means to distinguish between the first and
// the second attempts in Part I, because the first one hasn't
// changed the namesystem state yet.
// We run this analysis again in Part II where case 4 is impossible.
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
if (previous == null &&
@ -2259,7 +2367,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
src + " lastBlock=" + lastBlockInFile);
}
} else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
// Case 2
if (lastBlockInFile.getNumBytes() != 0) {
throw new IOException(
"Request looked like a retry to allocate block " +
@ -2267,76 +2374,39 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
lastBlockInFile.getNumBytes() + " bytes");
}
// The retry case ("b" above) -- abandon the old block.
// Case 2
// Return the last block.
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
"caught retry for allocation of a new block in " +
src + ". Abandoning old block " + lastBlockInFile);
dir.removeBlock(src, pendingFile, lastBlockInFile);
dir.persistBlocks(src, pendingFile);
src + ". Returning previously allocated block " + lastBlockInFile);
long offset = pendingFile.computeFileSize(true);
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
((BlockInfoUnderConstruction)lastBlockInFile).getExpectedLocations(),
offset);
return inodesInPath;
} else {
// Case 3
throw new IOException("Cannot allocate block in " + src + ": " +
"passed 'previous' block " + previous + " does not match actual " +
"last block in file " + lastBlockInFile);
}
}
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile, previousBlock);
//
// If we fail this, bad things happen!
//
// Check if the penultimate block is minimally replicated
if (!checkFileProgress(pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet: " + src);
}
fileLength = pendingFile.computeContentSummary().getLength();
blockSize = pendingFile.getPreferredBlockSize();
clientNode = pendingFile.getClientNode();
replication = pendingFile.getBlockReplication();
} finally {
writeUnlock();
return inodesInPath;
}
// choose targets for the new block to be allocated.
final DatanodeDescriptor targets[] = blockManager.chooseTarget(
src, replication, clientNode, excludedNodes, blockSize);
// Allocate a new block and record it in the INode.
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException("Cannot add block to " + src, safeMode);
}
final INodesInPath inodesInPath = dir.rootDir.getExistingPathINodes(src, true);
final INode[] inodes = inodesInPath.getINodes();
final INodeFileUnderConstruction pendingFile
= checkLease(src, clientName, inodes[inodes.length - 1]);
if (!checkFileProgress(pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet:" + src);
}
// allocate new block record block locations in INode.
newBlock = allocateBlock(src, inodesInPath, targets);
for (DatanodeDescriptor dn : targets) {
dn.incBlocksScheduled();
}
dir.persistBlocks(src, pendingFile);
} finally {
writeUnlock();
}
if (persistBlocks) {
getEditLog().logSync();
}
// Create next block
LocatedBlock b = new LocatedBlock(getExtendedBlock(newBlock), targets, fileLength);
blockManager.setBlockToken(b, BlockTokenSecretManager.AccessMode.WRITE);
return b;
LocatedBlock makeLocatedBlock(Block blk,
DatanodeInfo[] locs,
long offset) throws IOException {
LocatedBlock lBlk = new LocatedBlock(
getExtendedBlock(blk), locs, offset);
getBlockManager().setBlockToken(
lBlk, BlockTokenSecretManager.AccessMode.WRITE);
return lBlk;
}
/** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
@ -2425,13 +2495,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// make sure that we still have the lease on this file.
private INodeFileUnderConstruction checkLease(String src, String holder)
throws LeaseExpiredException, UnresolvedLinkException {
throws LeaseExpiredException, UnresolvedLinkException,
FileNotFoundException {
assert hasReadOrWriteLock();
return checkLease(src, holder, dir.getINode(src));
return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder,
dir.getINode(src));
}
private INodeFileUnderConstruction checkLease(String src, String holder,
INode file) throws LeaseExpiredException {
private INodeFileUnderConstruction checkLease(String src, long fileId,
String holder, INode file) throws LeaseExpiredException,
FileNotFoundException {
assert hasReadOrWriteLock();
if (file == null || !(file instanceof INodeFile)) {
Lease lease = leaseManager.getLease(holder);
@ -2452,6 +2525,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throw new LeaseExpiredException("Lease mismatch on " + src + " owned by "
+ pendingFile.getClientName() + " but is accessed by " + holder);
}
INodeId.checkId(fileId, pendingFile);
return pendingFile;
}
@ -2528,22 +2602,33 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
/**
* Allocate a block at the given pending filename
* Save allocated block at the given pending filename
*
* @param src path to the file
* @param inodesInPath representing each of the components of src.
* The last INode is the INode for the file.
* @throws QuotaExceededException If addition of block exceeds space quota
*/
private Block allocateBlock(String src, INodesInPath inodesInPath,
DatanodeDescriptor targets[]) throws IOException {
BlockInfo saveAllocatedBlock(String src, INodesInPath inodesInPath,
Block newBlock, DatanodeDescriptor targets[]) throws IOException {
assert hasWriteLock();
BlockInfo b = dir.addBlock(src, inodesInPath, newBlock, targets);
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+ getBlockPoolId() + " " + b);
for (DatanodeDescriptor dn : targets) {
dn.incBlocksScheduled();
}
return b;
}
/**
* Create new block with a unique block id and a new generation stamp.
*/
Block createNewBlock() throws IOException {
assert hasWriteLock();
Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0);
// Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp());
b = dir.addBlock(src, inodesInPath, b, targets);
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+ blockPoolId + " " + b);
return b;
}
@ -5582,7 +5667,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
@Override
public boolean isAvoidingStaleDataNodesForWrite() {
return this.blockManager.getDatanodeManager()
.isAvoidingStaleDataNodesForWrite();
.shouldAvoidStaleDataNodesForWrite();
}
/**

View File

@ -17,18 +17,21 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.SequentialNumber;
/**
* An id which uniquely identifies an inode
* An id which uniquely identifies an inode. Id 1 to 1000 are reserved for
* potential future usage. The id won't be recycled and is not expected to wrap
* around in a very long time. Root inode id is always 1001. Id 0 is used for
* backward compatibility support.
*/
@InterfaceAudience.Private
class INodeId extends SequentialNumber {
public class INodeId extends SequentialNumber {
/**
* The last reserved inode id. Reserve id 1 to 1000 for potential future
* usage. The id won't be recycled and is not expected to wrap around in a
* very long time. Root inode id will be 1001.
* The last reserved inode id.
*/
public static final long LAST_RESERVED_ID = 1000L;
@ -38,6 +41,19 @@ class INodeId extends SequentialNumber {
*/
public static final long GRANDFATHER_INODE_ID = 0;
/**
* To check if the request id is the same as saved id. Don't check fileId
* with GRANDFATHER_INODE_ID for backward compatibility.
*/
public static void checkId(long requestId, INode inode)
throws FileNotFoundException {
if (requestId != GRANDFATHER_INODE_ID && requestId != inode.getId()) {
throw new FileNotFoundException(
"ID mismatch. Request id and saved id: " + requestId + " , "
+ inode.getId());
}
}
INodeId() {
super(LAST_RESERVED_ID);
}

View File

@ -587,7 +587,7 @@ public class NNStorage extends Storage implements Closeable,
}
// Set Block pool ID in version with federation support
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
if (versionSupportsFederation()) {
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
@ -634,7 +634,7 @@ public class NNStorage extends Storage implements Closeable,
) throws IOException {
super.setPropertiesFromFields(props, sd);
// Set blockpoolID in version with federation support
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
if (versionSupportsFederation()) {
props.setProperty("blockpoolID", blockpoolID);
}
}

View File

@ -78,6 +78,7 @@ import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
@ -780,6 +781,26 @@ public class NameNode {
return initializeSharedEdits(conf, force, false);
}
/**
* Clone the supplied configuration but remove the shared edits dirs.
*
* @param conf Supplies the original configuration.
* @return Cloned configuration without the shared edit dirs.
* @throws IOException on failure to generate the configuration.
*/
private static Configuration getConfigurationWithoutSharedEdits(
Configuration conf)
throws IOException {
List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
String editsDirsString = Joiner.on(",").join(editsDirs);
Configuration confWithoutShared = new Configuration(conf);
confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
editsDirsString);
return confWithoutShared;
}
/**
* Format a new shared edits dir and copy in enough edit log segments so that
* the standby NN can start up.
@ -809,11 +830,8 @@ public class NameNode {
NNStorage existingStorage = null;
try {
Configuration confWithoutShared = new Configuration(conf);
confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
FSNamesystem fsns = FSNamesystem.loadFromDisk(confWithoutShared,
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf, false));
FSNamesystem fsns =
FSNamesystem.loadFromDisk(getConfigurationWithoutSharedEdits(conf));
existingStorage = fsns.getFSImage().getStorage();
NamespaceInfo nsInfo = existingStorage.getNamespaceInfo();

View File

@ -422,13 +422,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override // ClientProtocol
public void create(String src,
FsPermission masked,
String clientName,
EnumSetWritable<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize) throws IOException {
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize)
throws IOException {
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.create: file "
@ -438,12 +435,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
throw new IOException("create: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.startFile(src,
new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
null, masked),
clientName, clientMachine, flag.get(), createParent, replication, blockSize);
HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
UserGroupInformation.getCurrentUser().getShortUserName(), null, masked),
clientName, clientMachine, flag.get(), createParent, replication,
blockSize);
metrics.incrFilesCreated();
metrics.incrCreateFileOps();
return fileStatus;
}
@Override // ClientProtocol
@ -483,15 +481,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
namesystem.setOwner(src, username, groupname);
}
@Override // ClientProtocol
public LocatedBlock addBlock(String src,
String clientName,
ExtendedBlock previous,
DatanodeInfo[] excludedNodes)
@Override
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId)
throws IOException {
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
+src+" for "+clientName);
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
+ " fileId=" + fileId + " for " + clientName);
}
HashMap<Node, Node> excludedNodesSet = null;
if (excludedNodes != null) {
@ -500,8 +496,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
excludedNodesSet.put(node, node);
}
}
LocatedBlock locatedBlock =
namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
clientName, previous, excludedNodesSet);
if (locatedBlock != null)
metrics.incrAddBlockOps();
return locatedBlock;

View File

@ -17,12 +17,15 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
@ -64,6 +67,14 @@ import org.znerd.xmlenc.XMLOutputter;
import com.google.common.base.Preconditions;
class NamenodeJspHelper {
static String fraction2String(double value) {
return StringUtils.format("%.2f", value);
}
static String fraction2String(long numerator, long denominator) {
return fraction2String(numerator/(double)denominator);
}
static String getSafeModeText(FSNamesystem fsn) {
if (!fsn.isInSafeMode())
return "";
@ -361,20 +372,20 @@ class NamenodeJspHelper {
+ "DFS Remaining" + colTxt() + ":" + colTxt()
+ StringUtils.byteDesc(remaining) + rowTxt() + colTxt() + "DFS Used%"
+ colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentUsed) + " %" + rowTxt()
+ percent2String(percentUsed) + rowTxt()
+ colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentRemaining) + " %"
+ percent2String(percentRemaining)
+ rowTxt() + colTxt() + "Block Pool Used" + colTxt() + ":" + colTxt()
+ StringUtils.byteDesc(bpUsed) + rowTxt()
+ colTxt() + "Block Pool Used%"+ colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentBpUsed) + " %"
+ percent2String(percentBpUsed)
+ rowTxt() + colTxt() + "DataNodes usages" + colTxt() + ":" + colTxt()
+ "Min %" + colTxt() + "Median %" + colTxt() + "Max %" + colTxt()
+ "stdev %" + rowTxt() + colTxt() + colTxt() + colTxt()
+ StringUtils.limitDecimalTo2(min) + " %"
+ colTxt() + StringUtils.limitDecimalTo2(median) + " %"
+ colTxt() + StringUtils.limitDecimalTo2(max) + " %"
+ colTxt() + StringUtils.limitDecimalTo2(dev) + " %"
+ percent2String(min)
+ colTxt() + percent2String(median)
+ colTxt() + percent2String(max)
+ colTxt() + percent2String(dev)
+ rowTxt() + colTxt()
+ "<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> "
+ colTxt() + ":" + colTxt() + live.size()
@ -443,7 +454,13 @@ class NamenodeJspHelper {
nodeToRedirect = nn.getHttpAddress().getHostName();
redirectPort = nn.getHttpAddress().getPort();
}
String addr = nn.getNameNodeAddressHostPortString();
InetSocketAddress rpcAddr = nn.getNameNodeAddress();
String rpcHost = rpcAddr.getAddress().isAnyLocalAddress()
? URI.create(request.getRequestURL().toString()).getHost()
: rpcAddr.getAddress().getHostAddress();
String addr = rpcHost + ":" + rpcAddr.getPort();
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
+ "/browseDirectory.jsp?namenodeInfoPort="
@ -562,9 +579,9 @@ class NamenodeJspHelper {
long u = d.getDfsUsed();
long nu = d.getNonDfsUsed();
long r = d.getRemaining();
String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());
String percentRemaining = StringUtils.limitDecimalTo2(d
.getRemainingPercent());
final double percentUsedValue = d.getDfsUsedPercent();
String percentUsed = fraction2String(percentUsedValue);
String percentRemaining = fraction2String(d.getRemainingPercent());
String adminState = d.getAdminState().toString();
@ -572,32 +589,30 @@ class NamenodeJspHelper {
long currentTime = Time.now();
long bpUsed = d.getBlockPoolUsed();
String percentBpUsed = StringUtils.limitDecimalTo2(d
.getBlockPoolUsedPercent());
String percentBpUsed = fraction2String(d.getBlockPoolUsedPercent());
out.print("<td class=\"lastcontact\"> "
+ ((currentTime - timestamp) / 1000)
+ "<td class=\"adminstate\">"
+ adminState
+ "<td align=\"right\" class=\"capacity\">"
+ StringUtils.limitDecimalTo2(c * 1.0 / diskBytes)
+ fraction2String(c, diskBytes)
+ "<td align=\"right\" class=\"used\">"
+ StringUtils.limitDecimalTo2(u * 1.0 / diskBytes)
+ fraction2String(u, diskBytes)
+ "<td align=\"right\" class=\"nondfsused\">"
+ StringUtils.limitDecimalTo2(nu * 1.0 / diskBytes)
+ fraction2String(nu, diskBytes)
+ "<td align=\"right\" class=\"remaining\">"
+ StringUtils.limitDecimalTo2(r * 1.0 / diskBytes)
+ fraction2String(r, diskBytes)
+ "<td align=\"right\" class=\"pcused\">"
+ percentUsed
+ "<td class=\"pcused\">"
+ ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed),
100)
+ ServletUtil.percentageGraph((int)percentUsedValue, 100)
+ "<td align=\"right\" class=\"pcremaining\">"
+ percentRemaining
+ "<td title=" + "\"blocks scheduled : "
+ d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks()+"\n"
+ "<td align=\"right\" class=\"bpused\">"
+ StringUtils.limitDecimalTo2(bpUsed * 1.0 / diskBytes)
+ fraction2String(bpUsed, diskBytes)
+ "<td align=\"right\" class=\"pcbpused\">"
+ percentBpUsed
+ "<td align=\"right\" class=\"volfails\">"

View File

@ -475,14 +475,20 @@ public class SecondaryNameNode implements Runnable {
// Returns a token that would be used to upload the merged image.
CheckpointSignature sig = namenode.rollEditLog();
if ((checkpointImage.getNamespaceID() == 0) ||
(sig.isSameCluster(checkpointImage) &&
boolean loadImage = false;
boolean isFreshCheckpointer = (checkpointImage.getNamespaceID() == 0);
boolean isSameCluster =
(dstStorage.versionSupportsFederation() && sig.isSameCluster(checkpointImage)) ||
(!dstStorage.versionSupportsFederation() && sig.namespaceIdMatches(checkpointImage));
if (isFreshCheckpointer ||
(isSameCluster &&
!sig.storageVersionMatches(checkpointImage.getStorage()))) {
// if we're a fresh 2NN, or if we're on the same cluster and our storage
// needs an upgrade, just take the storage info from the server.
dstStorage.setStorageInfo(sig);
dstStorage.setClusterID(sig.getClusterID());
dstStorage.setBlockPoolID(sig.getBlockpoolID());
loadImage = true;
}
sig.validateStorageInfo(checkpointImage);
@ -492,7 +498,7 @@ public class SecondaryNameNode implements Runnable {
RemoteEditLogManifest manifest =
namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
boolean loadImage = downloadCheckpointFiles(
loadImage |= downloadCheckpointFiles(
fsName, checkpointImage, sig, manifest); // Fetch fsimage and edits
doMerge(sig, manifest, loadImage, checkpointImage, namesystem);

View File

@ -316,8 +316,7 @@ public class DFSAdmin extends FsShell {
System.out.println("DFS Used: " + used
+ " (" + StringUtils.byteDesc(used) + ")");
System.out.println("DFS Used%: "
+ StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
+ "%");
+ StringUtils.formatPercent(used/(double)presentCapacity, 2));
/* These counts are not always upto date. They are updated after
* iteration of an internal list. Should be updated in a few seconds to

View File

@ -219,6 +219,7 @@ public class JsonUtil {
m.put("modificationTime", status.getModificationTime());
m.put("blockSize", status.getBlockSize());
m.put("replication", status.getReplication());
m.put("fileId", status.getFileId());
return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m);
}
@ -243,9 +244,10 @@ public class JsonUtil {
final long mTime = (Long) m.get("modificationTime");
final long blockSize = (Long) m.get("blockSize");
final short replication = (short) (long) (Long) m.get("replication");
final long fileId = (Long) m.get("fileId");
return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName));
symlink, DFSUtil.string2Bytes(localName), fileId);
}
/** Convert an ExtendedBlock to a Json map. */

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.web.resources;
/** The concat source paths parameter. */
public class ConcatSourcesParam extends StringParam {
/** Parameter name. */
public static final String NAME = "srcs";
public static final String NAME = "sources";
public static final String DEFAULT = NULL;

View File

@ -67,7 +67,8 @@ message CreateRequestProto {
required uint64 blockSize = 7;
}
message CreateResponseProto { // void response
message CreateResponseProto {
optional HdfsFileStatusProto fs = 1;
}
message AppendRequestProto {
@ -119,6 +120,7 @@ message AddBlockRequestProto {
required string clientName = 2;
optional ExtendedBlockProto previous = 3;
repeated DatanodeInfoProto excludeNodes = 4;
optional uint64 fileId = 5 [default = 0]; // default as a bogus id
}
message AddBlockResponseProto {

View File

@ -170,6 +170,9 @@ message HdfsFileStatusProto {
optional uint32 block_replication = 10 [default = 0]; // only 16bits used
optional uint64 blocksize = 11 [default = 0];
optional LocatedBlocksProto locations = 12; // suppled only if asked by client
// Optional field for fileId
optional uint64 fileId = 13 [default = 0]; // default as an invalid id
}
/**

View File

@ -999,17 +999,14 @@
</property>
<property>
<name>dfs.namenode.check.stale.datanode</name>
<name>dfs.namenode.avoid.read.stale.datanode</name>
<value>false</value>
<description>
Indicate whether or not to check "stale" datanodes whose
Indicate whether or not to avoid reading from &quot;stale&quot; datanodes whose
heartbeat messages have not been received by the namenode
for more than a specified time interval. If this configuration
parameter is set as true, the system will keep track
of the number of stale datanodes. The stale datanodes will be
for more than a specified time interval. Stale datanodes will be
moved to the end of the node list returned for reading. See
dfs.namenode.avoid.write.stale.datanode for details on how this
affects writes.
dfs.namenode.avoid.write.stale.datanode for a similar setting for writes.
</description>
</property>
@ -1017,13 +1014,13 @@
<name>dfs.namenode.avoid.write.stale.datanode</name>
<value>false</value>
<description>
Indicate whether or not to avoid writing to "stale" datanodes whose
Indicate whether or not to avoid writing to &quot;stale&quot; datanodes whose
heartbeat messages have not been received by the namenode
for more than a specified time interval. If this configuration
parameter and dfs.namenode.check.stale.datanode are both set as true,
the writing will avoid using stale datanodes unless a high number
of datanodes are marked as stale. See
dfs.namenode.write.stale.datanode.ratio for details.
for more than a specified time interval. Writes will avoid using
stale datanodes unless more than a configured ratio
(dfs.namenode.write.stale.datanode.ratio) of datanodes are marked as
stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting
for reads.
</description>
</property>

View File

@ -109,6 +109,9 @@ WebHDFS REST API
* {{{Append to a File}<<<APPEND>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
* {{{Concat File(s)}<<<CONCAT>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
* HTTP DELETE
* {{{Delete a File/Directory}<<<DELETE>>>}}
@ -299,6 +302,32 @@ Content-Length: 0
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append
** {Concat File(s)}
* Submit a HTTP POST request.
+---------------------------------
curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<SOURCES>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
This REST API call is available as of Hadoop version 2.0.3.
Please note that <SOURCES> is a comma seperated list of absolute paths.
(Example: sources=/test/file1,/test/file2,/test/file3)
See also:
{{{Sources}<<<sources>>>}},
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat
** {Open and Read a File}
* Submit a HTTP GET request with automatically following redirects.
@ -1727,6 +1756,29 @@ var tokenProperties =
{{{Set Replication Factor}<<<SETREPLICATION>>>}}
** {Sources}
*----------------+-------------------------------------------------------------------+
|| Name | <<<sources>>> |
*----------------+-------------------------------------------------------------------+
|| Description | The comma seperated absolute paths used for concatenation. |
*----------------+-------------------------------------------------------------------+
|| Type | String |
*----------------+-------------------------------------------------------------------+
|| Default Value | \<empty\> |
*----------------+-------------------------------------------------------------------+
|| Valid Values | A list of comma seperated absolute FileSystem paths without scheme and authority. |
*----------------+-------------------------------------------------------------------+
|| Syntax | See the note in {{Delegation}}. |
*----------------+-------------------------------------------------------------------+
<<Note>> that sources are absolute FileSystem paths.
See also:
{{{Concat File(s)}<<<CONCAT>>>}}
** {Token}
*----------------+-------------------------------------------------------------------+

View File

@ -33,6 +33,7 @@ import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URL;
@ -620,6 +621,9 @@ public class DFSTestUtil {
*/
public static byte[] urlGetBytes(URL url) throws IOException {
URLConnection conn = url.openConnection();
HttpURLConnection hc = (HttpURLConnection)conn;
assertEquals(HttpURLConnection.HTTP_OK, hc.getResponseCode());
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
return out.toByteArray();

View File

@ -23,22 +23,34 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.StandardSocketFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestDFSClientFailover {
@ -91,6 +103,63 @@ public class TestDFSClientFailover {
fs.close();
}
/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
InjectingSocketFactory.class, SocketFactory.class);
// Set up the InjectingSocketFactory to throw a ConnectTimeoutException
// when connecting to the first NN.
InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
// Make the second NN the active one.
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
// Call a non-idempotent method, and ensure the failover of the call proceeds
// successfully.
IOUtils.closeStream(fs.create(TEST_FILE));
}
private static class InjectingSocketFactory extends StandardSocketFactory {
static SocketFactory defaultFactory = SocketFactory.getDefault();
static int portToInjectOn;
@Override
public Socket createSocket() throws IOException {
Socket spy = Mockito.spy(defaultFactory.createSocket());
// Simplify our spying job by not having to also spy on the channel
Mockito.doReturn(null).when(spy).getChannel();
// Throw a ConnectTimeoutException when connecting to our target "bad"
// host.
Mockito.doThrow(new ConnectTimeoutException("injected"))
.when(spy).connect(
Mockito.argThat(new MatchesPort()),
Mockito.anyInt());
return spy;
}
private class MatchesPort extends BaseMatcher<SocketAddress> {
@Override
public boolean matches(Object arg0) {
return ((InetSocketAddress)arg0).getPort() == portToInjectOn;
}
@Override
public void describeTo(Description desc) {
desc.appendText("matches port " + portToInjectOn);
}
}
}
/**
* Regression test for HDFS-2683.
*/

View File

@ -23,7 +23,10 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyShort;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
@ -49,13 +52,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsUtils;
import org.apache.hadoop.hdfs.protocol.Block;
@ -64,12 +67,14 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
@ -208,7 +213,7 @@ public class TestDFSClientRetries {
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings("serial")
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException
{
@ -235,7 +240,22 @@ public class TestDFSClientRetries {
when(mockNN.addBlock(anyString(),
anyString(),
any(ExtendedBlock.class),
any(DatanodeInfo[].class))).thenAnswer(answer);
any(DatanodeInfo[].class),
anyLong())).thenAnswer(answer);
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010))
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
@ -369,7 +389,8 @@ public class TestDFSClientRetries {
return ret2;
}
}).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
Mockito.anyLong());
doAnswer(new Answer<Boolean>() {
@ -410,7 +431,8 @@ public class TestDFSClientRetries {
// Make sure the mock was actually properly injected.
Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
Mockito.anyLong());
Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any());

View File

@ -619,6 +619,16 @@ public class TestDFSUtil {
assertEquals(1, uris.size());
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
// it will automatically convert it to hostname
conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
uris = DFSUtil.getNameServiceUris(conf);
assertEquals(1, uris.size());
for (URI uri : uris) {
assertFalse(uri.getHost().equals("127.0.0.1"));
}
}
@Test

View File

@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
@ -518,7 +519,7 @@ public class TestFileCreation {
// add one block to the file
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
client.clientName, null, null);
client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID);
System.out.println("testFileCreationError2: "
+ "Added block " + location.getBlock());
@ -568,8 +569,8 @@ public class TestFileCreation {
final Path f = new Path("/foo.txt");
createFile(dfs, f, 3);
try {
cluster.getNameNodeRpc().addBlock(f.toString(),
client.clientName, null, null);
cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
null, null, INodeId.GRANDFATHER_INODE_ID);
fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD!", ioe);

Some files were not shown because too many files have changed in this diff Show More