Merge r1241554 through r1242605 from 0.23.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1242642 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-02-10 03:04:05 +00:00
commit 6c0178e7e5
116 changed files with 3733 additions and 766 deletions

View File

@ -20,12 +20,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath> <relativePath>../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-assemblies</artifactId> <artifactId>hadoop-assemblies</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>Apache Hadoop Assemblies</name> <name>Apache Hadoop Assemblies</name>
<description>Apache Hadoop Assemblies</description> <description>Apache Hadoop Assemblies</description>

View File

@ -18,12 +18,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath> <relativePath>../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId> <artifactId>hadoop-client</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<description>Apache Hadoop Client</description> <description>Apache Hadoop Client</description>

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath> <relativePath>../../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId> <artifactId>hadoop-annotations</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Annotations</description> <description>Apache Hadoop Annotations</description>
<name>Apache Hadoop Annotations</name> <name>Apache Hadoop Annotations</name>
<packaging>jar</packaging> <packaging>jar</packaging>

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath> <relativePath>../../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth-examples</artifactId> <artifactId>hadoop-auth-examples</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<packaging>war</packaging> <packaging>war</packaging>
<name>Apache Hadoop Auth Examples</name> <name>Apache Hadoop Auth Examples</name>

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath> <relativePath>../../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId> <artifactId>hadoop-auth</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>Apache Hadoop Auth</name> <name>Apache Hadoop Auth</name>

View File

@ -63,7 +63,29 @@ Release 0.23-PB - Unreleased
HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
initialization (atm) initialization (atm)
Release 0.23.1 - Unreleased Release 0.23.2 - UNRELEASED
NEW FEATURES
IMPROVEMENTS
HADOOP-8032. mvn site:stage-deploy should be able to use the scp protocol
to stage documents (Ravi Prakash via tgraves)
HADOOP-7923. Automate the updating of version numbers in the doc system.
(szetszwo)
OPTIMIZATIONS
BUG FIXES
HADOOP-8042 When copying a file out of HDFS, modifying it, and uploading
it back into HDFS, the put fails due to a CRC mismatch
(Daryn Sharp via bobby)
HADOOP-8035 Hadoop Maven site is inefficient and runs phases redundantly
(abayer via tucu)
Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -166,8 +188,6 @@ Release 0.23.1 - Unreleased
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)
BUG FIXES BUG FIXES
HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum
(Daryn Sharp via bobby)
HADOOP-8018. Hudson auto test for HDFS has started throwing javadoc HADOOP-8018. Hudson auto test for HDFS has started throwing javadoc
(Jon Eagles via bobby) (Jon Eagles via bobby)
@ -184,6 +204,14 @@ Release 0.23.1 - Unreleased
HADOOP-7811. TestUserGroupInformation#testGetServerSideGroups test fails in chroot. HADOOP-7811. TestUserGroupInformation#testGetServerSideGroups test fails in chroot.
(Jonathan Eagles via mahadev) (Jonathan Eagles via mahadev)
HADOOP-7813. Fix test-patch to use proper numerical comparison when checking
javadoc and findbugs warning counts. (Jonathan Eagles via tlipcon)
HADOOP-7841. Run tests with non-secure random. (tlipcon)
HADOOP-7851. Configuration.getClasses() never returns the default value.
(Uma Maheswara Rao G via amarrk)
HADOOP-7787. Make source tarball use conventional name. HADOOP-7787. Make source tarball use conventional name.
(Bruno Mahé via tomwhite) (Bruno Mahé via tomwhite)
@ -277,6 +305,9 @@ Release 0.23.1 - Unreleased
HADOOP-8012. hadoop-daemon.sh and yarn-daemon.sh are trying to mkdir HADOOP-8012. hadoop-daemon.sh and yarn-daemon.sh are trying to mkdir
and chown log/pid dirs which can fail. (Roman Shaposhnik via eli) and chown log/pid dirs which can fail. (Roman Shaposhnik via eli)
HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum
(Daryn Sharp via bobby)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId> <artifactId>hadoop-project-dist</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath> <relativePath>../../hadoop-project-dist</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Common</description> <description>Apache Hadoop Common</description>
<name>Apache Hadoop Common</name> <name>Apache Hadoop Common</name>
<packaging>jar</packaging> <packaging>jar</packaging>

View File

@ -43,6 +43,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0}; private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};
private int bytesPerChecksum = 512; private int bytesPerChecksum = 512;
private boolean verifyChecksum = true; private boolean verifyChecksum = true;
private boolean writeChecksum = true;
public static double getApproxChkSumLength(long size) { public static double getApproxChkSumLength(long size) {
return ChecksumFSOutputSummer.CHKSUM_AS_FRACTION * size; return ChecksumFSOutputSummer.CHKSUM_AS_FRACTION * size;
@ -67,6 +68,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
this.verifyChecksum = verifyChecksum; this.verifyChecksum = verifyChecksum;
} }
@Override
public void setWriteChecksum(boolean writeChecksum) {
this.writeChecksum = writeChecksum;
}
/** get the raw file system */ /** get the raw file system */
public FileSystem getRawFileSystem() { public FileSystem getRawFileSystem() {
return fs; return fs;
@ -428,9 +434,20 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
throw new IOException("Mkdirs failed to create " + parent); throw new IOException("Mkdirs failed to create " + parent);
} }
} }
final FSDataOutputStream out = new FSDataOutputStream( final FSDataOutputStream out;
if (writeChecksum) {
out = new FSDataOutputStream(
new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication, new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
blockSize, progress), null); blockSize, progress), null);
} else {
out = fs.create(f, permission, overwrite, bufferSize, replication,
blockSize, progress);
// remove the checksum file since we aren't writing one
Path checkFile = getChecksumFile(f);
if (fs.exists(checkFile)) {
fs.delete(checkFile, true);
}
}
if (permission != null) { if (permission != null) {
setPermission(f, permission); setPermission(f, permission);
} }

View File

@ -1936,6 +1936,15 @@ public abstract class FileSystem extends Configured implements Closeable {
//doesn't do anything //doesn't do anything
} }
/**
* Set the write checksum flag. This is only applicable if the
* corresponding FileSystem supports checksum. By default doesn't do anything.
* @param writeChecksum
*/
public void setWriteChecksum(boolean writeChecksum) {
//doesn't do anything
}
/** /**
* Return a list of file status objects that corresponds to the list of paths * Return a list of file status objects that corresponds to the list of paths
* excluding those non-existent paths. * excluding those non-existent paths.

View File

@ -361,6 +361,11 @@ public class FilterFileSystem extends FileSystem {
fs.setVerifyChecksum(verifyChecksum); fs.setVerifyChecksum(verifyChecksum);
} }
@Override
public void setWriteChecksum(boolean writeChecksum) {
fs.setVerifyChecksum(writeChecksum);
}
@Override @Override
public Configuration getConf() { public Configuration getConf() {
return fs.getConf(); return fs.getConf();

View File

@ -41,7 +41,9 @@ import org.apache.hadoop.io.IOUtils;
*/ */
abstract class CommandWithDestination extends FsCommand { abstract class CommandWithDestination extends FsCommand {
protected PathData dst; protected PathData dst;
protected boolean overwrite = false; private boolean overwrite = false;
private boolean verifyChecksum = true;
private boolean writeChecksum = true;
/** /**
* *
@ -53,6 +55,14 @@ abstract class CommandWithDestination extends FsCommand {
overwrite = flag; overwrite = flag;
} }
protected void setVerifyChecksum(boolean flag) {
verifyChecksum = flag;
}
protected void setWriteChecksum(boolean flag) {
writeChecksum = flag;
}
/** /**
* The last arg is expected to be a local path, if only one argument is * The last arg is expected to be a local path, if only one argument is
* given then the destination will be the current directory * given then the destination will be the current directory
@ -201,6 +211,7 @@ abstract class CommandWithDestination extends FsCommand {
* @throws IOException if copy fails * @throws IOException if copy fails
*/ */
protected void copyFileToTarget(PathData src, PathData target) throws IOException { protected void copyFileToTarget(PathData src, PathData target) throws IOException {
src.fs.setVerifyChecksum(verifyChecksum);
copyStreamToTarget(src.fs.open(src.path), target); copyStreamToTarget(src.fs.open(src.path), target);
} }
@ -217,6 +228,7 @@ abstract class CommandWithDestination extends FsCommand {
if (target.exists && (target.stat.isDirectory() || !overwrite)) { if (target.exists && (target.stat.isDirectory() || !overwrite)) {
throw new PathExistsException(target.toString()); throw new PathExistsException(target.toString());
} }
target.fs.setWriteChecksum(writeChecksum);
PathData tempFile = null; PathData tempFile = null;
try { try {
tempFile = target.createTempFile(target+"._COPYING_"); tempFile = target.createTempFile(target+"._COPYING_");

View File

@ -25,7 +25,6 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
/** Various commands for copy files */ /** Various commands for copy files */
@ -103,43 +102,17 @@ class CopyCommands {
"to the local name. <src> is kept. When copying multiple,\n" + "to the local name. <src> is kept. When copying multiple,\n" +
"files, the destination must be a directory."; "files, the destination must be a directory.";
/**
* The prefix for the tmp file used in copyToLocal.
* It must be at least three characters long, required by
* {@link java.io.File#createTempFile(String, String, File)}.
*/
private boolean copyCrc;
private boolean verifyChecksum;
@Override @Override
protected void processOptions(LinkedList<String> args) protected void processOptions(LinkedList<String> args)
throws IOException { throws IOException {
CommandFormat cf = new CommandFormat( CommandFormat cf = new CommandFormat(
1, Integer.MAX_VALUE, "crc", "ignoreCrc"); 1, Integer.MAX_VALUE, "crc", "ignoreCrc");
cf.parse(args); cf.parse(args);
copyCrc = cf.getOpt("crc"); setWriteChecksum(cf.getOpt("crc"));
verifyChecksum = !cf.getOpt("ignoreCrc"); setVerifyChecksum(!cf.getOpt("ignoreCrc"));
setRecursive(true); setRecursive(true);
getLocalDestination(args); getLocalDestination(args);
} }
@Override
protected void copyFileToTarget(PathData src, PathData target)
throws IOException {
src.fs.setVerifyChecksum(verifyChecksum);
if (copyCrc && !(src.fs instanceof ChecksumFileSystem)) {
displayWarning(src.fs + ": Does not support checksums");
copyCrc = false;
}
super.copyFileToTarget(src, target);
if (copyCrc) {
// should we delete real file if crc copy fails?
super.copyFileToTarget(src.getChecksumFile(), target.getChecksumFile());
}
}
} }
/** /**

View File

@ -27,7 +27,6 @@ import java.net.URISyntaxException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocalFileSystem;
@ -169,19 +168,6 @@ public class PathData {
} }
} }
/**
* Return the corresponding crc data for a file. Avoids exposing the fs
* contortions to the caller.
* @return PathData of the crc file
* @throws IOException is anything goes wrong
*/
public PathData getChecksumFile() throws IOException {
checkIfExists(FileTypeRequirement.SHOULD_NOT_BE_DIRECTORY);
ChecksumFileSystem srcFs = (ChecksumFileSystem)fs;
Path srcPath = srcFs.getChecksumFile(path);
return new PathData(srcFs.getRawFileSystem(), srcPath.toString());
}
/** /**
* Returns a temporary file for this PathData with the given extension. * Returns a temporary file for this PathData with the given extension.
* The file will be deleted on exit. * The file will be deleted on exit.

View File

@ -470,6 +470,15 @@ public class ViewFileSystem extends FileSystem {
} }
} }
@Override
public void setWriteChecksum(final boolean writeChecksum) {
List<InodeTree.MountPoint<FileSystem>> mountPoints =
fsState.getMountPoints();
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
mount.target.targetFileSystem.setWriteChecksum(writeChecksum);
}
}
public MountPoint[] getMountPoints() { public MountPoint[] getMountPoints() {
List<InodeTree.MountPoint<FileSystem>> mountPoints = List<InodeTree.MountPoint<FileSystem>> mountPoints =
fsState.getMountPoints(); fsState.getMountPoints();

View File

@ -0,0 +1,97 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFsShellCopy {
static Configuration conf;
static FsShell shell;
static LocalFileSystem lfs;
static Path testRootDir, srcPath, dstPath;
@BeforeClass
public static void setup() throws Exception {
conf = new Configuration();
shell = new FsShell(conf);
lfs = FileSystem.getLocal(conf);
testRootDir = new Path(
System.getProperty("test.build.data","test/build/data"), "testShellCopy");
lfs.mkdirs(testRootDir);
srcPath = new Path(testRootDir, "srcFile");
dstPath = new Path(testRootDir, "dstFile");
}
@Before
public void prepFiles() throws Exception {
lfs.setVerifyChecksum(true);
lfs.setWriteChecksum(true);
lfs.delete(srcPath, true);
lfs.delete(dstPath, true);
FSDataOutputStream out = lfs.create(srcPath);
out.writeChars("hi");
out.close();
assertTrue(lfs.exists(lfs.getChecksumFile(srcPath)));
}
@Test
public void testCopyNoCrc() throws Exception {
shellRun(0, "-get", srcPath.toString(), dstPath.toString());
checkPath(dstPath, false);
}
@Test
public void testCopyCrc() throws Exception {
shellRun(0, "-get", "-crc", srcPath.toString(), dstPath.toString());
checkPath(dstPath, true);
}
@Test
public void testCorruptedCopyCrc() throws Exception {
FSDataOutputStream out = lfs.getRawFileSystem().create(srcPath);
out.writeChars("bang");
out.close();
shellRun(1, "-get", srcPath.toString(), dstPath.toString());
}
@Test
public void testCorruptedCopyIgnoreCrc() throws Exception {
shellRun(0, "-get", "-ignoreCrc", srcPath.toString(), dstPath.toString());
checkPath(dstPath, false);
}
private void checkPath(Path p, boolean expectChecksum) throws IOException {
assertTrue(lfs.exists(p));
boolean hasChecksum = lfs.exists(lfs.getChecksumFile(p));
assertEquals(expectChecksum, hasChecksum);
}
private void shellRun(int n, String ... args) throws Exception {
assertEquals(n, shell.run(args));
}
}

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath> <relativePath>../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common-project</artifactId> <artifactId>hadoop-common-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Common Project</description> <description>Apache Hadoop Common Project</description>
<name>Apache Hadoop Common Project</name> <name>Apache Hadoop Common Project</name>
<packaging>pom</packaging> <packaging>pom</packaging>

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath> <relativePath>../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-dist</artifactId> <artifactId>hadoop-dist</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Distribution</description> <description>Apache Hadoop Distribution</description>
<name>Apache Hadoop Distribution</name> <name>Apache Hadoop Distribution</name>
<packaging>jar</packaging> <packaging>jar</packaging>

View File

@ -19,12 +19,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath> <relativePath>../../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-httpfs</artifactId> <artifactId>hadoop-hdfs-httpfs</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<packaging>war</packaging> <packaging>war</packaging>
<name>Apache Hadoop HttpFS</name> <name>Apache Hadoop HttpFS</name>

View File

@ -119,7 +119,28 @@ Release 0.23-PB - Unreleased
HDFS-2768. BackupNode stop can not close proxy connections because HDFS-2768. BackupNode stop can not close proxy connections because
it is not a proxy instance. (Uma Maheswara Rao G via eli) it is not a proxy instance. (Uma Maheswara Rao G via eli)
Release 0.23.1 - UNRELEASED Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
HDFS-2887. FSVolume, is a part of FSDatasetInterface implementation, should
not be referred outside FSDataset. A new FSVolumeInterface is defined.
The BlockVolumeChoosingPolicy.chooseVolume(..) method signature is also
updated. (szetszwo)
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
HDFS-2923. Namenode IPC handler count uses the wrong configuration key
(todd)
HDFS-2764. TestBackupNode is racy. (atm)
Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -134,6 +155,10 @@ Release 0.23.1 - UNRELEASED
HDFS-2545. Change WebHDFS to support multiple namenodes in federation. HDFS-2545. Change WebHDFS to support multiple namenodes in federation.
(szetszwo) (szetszwo)
HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy
with read/write capabilities. (tucu)
IMPROVEMENTS IMPROVEMENTS
HDFS-2560. Refactor BPOfferService to be a static inner class (todd) HDFS-2560. Refactor BPOfferService to be a static inner class (todd)
@ -167,9 +192,6 @@ Release 0.23.1 - UNRELEASED
HDFS-2604. Add a log message to show if WebHDFS is enabled and a HDFS-2604. Add a log message to show if WebHDFS is enabled and a
configuration section in the forrest doc. (szetszwo) configuration section in the forrest doc. (szetszwo)
HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy
with read/write capabilities. (tucu)
HDFS-2511. Add dev script to generate HDFS protobufs. (tucu) HDFS-2511. Add dev script to generate HDFS protobufs. (tucu)
HDFS-2654. Make BlockReaderLocal not extend RemoteBlockReader2. (eli) HDFS-2654. Make BlockReaderLocal not extend RemoteBlockReader2. (eli)
@ -180,7 +202,8 @@ Release 0.23.1 - UNRELEASED
HDFS-2335. DataNodeCluster and NNStorage always pull fresh entropy. HDFS-2335. DataNodeCluster and NNStorage always pull fresh entropy.
(Uma Maheswara Rao G via eli) (Uma Maheswara Rao G via eli)
HDFS-2574. Remove references to some deprecated properties in conf templates and defaults files. (Joe Crobak via harsh) HDFS-2574. Remove references to some deprecated properties in conf
templates and defaults files. (Joe Crobak via harsh)
HDFS-2722. HttpFs should not be using an int for block size. (harsh) HDFS-2722. HttpFs should not be using an int for block size. (harsh)
@ -190,19 +213,20 @@ Release 0.23.1 - UNRELEASED
HDFS-2349. Corruption detected during block transfers between DNs HDFS-2349. Corruption detected during block transfers between DNs
should log a WARN instead of INFO. (harsh) should log a WARN instead of INFO. (harsh)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh) HDFS-2729. Update BlockManager's comments regarding the invalid block
set (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh) HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream
method (harsh)
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh) HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh) HDFS-1314. Make dfs.blocksize accept size-indicating prefixes.
(Sho Shimauchi via harsh)
HDFS-69. Improve the 'dfsadmin' commandline help. (harsh) HDFS-69. Improve the 'dfsadmin' commandline help. (harsh)
HDFS-2788. HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code (eli) HDFS-2788. HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code. (eli)
HDFS-362. FSEditLog should not writes long and short as UTF8, and should HDFS-362. FSEditLog should not writes long and short as UTF8, and should
not use ArrayWritable for writing non-array items. (Uma Maheswara Rao G not use ArrayWritable for writing non-array items. (Uma Maheswara Rao G
@ -215,7 +239,7 @@ Release 0.23.1 - UNRELEASED
HDFS-2818. Fix a missing space issue in HDFS webapps' title tags. (Devaraj K via harsh) HDFS-2818. Fix a missing space issue in HDFS webapps' title tags. (Devaraj K via harsh)
HDFS-2397. Undeprecate SecondaryNameNode (eli) HDFS-2397. Undeprecate SecondaryNameNode. (eli)
HDFS-2814. NamenodeMXBean does not account for svn revision in the version HDFS-2814. NamenodeMXBean does not account for svn revision in the version
information. (Hitesh Shah via jitendra) information. (Hitesh Shah via jitendra)
@ -228,6 +252,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh) HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
HDFS-2786. Fix host-based token incompatibilities in DFSUtil. (Kihwal
Lee via jitendra)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd) HDFS-2130. Switch default checksum to CRC32C. (todd)
@ -319,11 +346,14 @@ Release 0.23.1 - UNRELEASED
HDFS-442. dfsthroughput in test jar throws NPE (harsh) HDFS-442. dfsthroughput in test jar throws NPE (harsh)
HDFS-2836. HttpFSServer still has 2 javadoc warnings in trunk (revans2 via tucu) HDFS-2836. HttpFSServer still has 2 javadoc warnings in trunk.
(revans2 via tucu)
HDFS-2837. mvn javadoc:javadoc not seeing LimitedPrivate class (revans2 via tucu) HDFS-2837. mvn javadoc:javadoc not seeing LimitedPrivate class.
(revans2 via tucu)
HDFS-2840. TestHostnameFilter should work with localhost or localhost.localdomain (tucu) HDFS-2840. TestHostnameFilter should work with localhost or
localhost.localdomain (tucu)
HDFS-2791. If block report races with closing of file, replica is HDFS-2791. If block report races with closing of file, replica is
incorrectly marked corrupt. (todd) incorrectly marked corrupt. (todd)

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId> <artifactId>hadoop-project-dist</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath> <relativePath>../../hadoop-project-dist</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId> <artifactId>hadoop-hdfs</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop HDFS</description> <description>Apache Hadoop HDFS</description>
<name>Apache Hadoop HDFS</name> <name>Apache Hadoop HDFS</name>
<packaging>jar</packaging> <packaging>jar</packaging>

View File

@ -608,19 +608,6 @@ public class DFSUtil {
return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
} }
/**
* @param address address of format host:port
* @return InetSocketAddress for the address
*/
public static InetSocketAddress getSocketAddress(String address) {
int colon = address.indexOf(":");
if (colon < 0) {
return new InetSocketAddress(address, 0);
}
return new InetSocketAddress(address.substring(0, colon),
Integer.parseInt(address.substring(colon + 1)));
}
/** /**
* Round bytes to GiB (gibibyte) * Round bytes to GiB (gibibyte)
* @param bytes number of bytes * @param bytes number of bytes

View File

@ -498,7 +498,7 @@ public class JspHelper {
String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS); String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS);
InetSocketAddress namenodeAddress = null; InetSocketAddress namenodeAddress = null;
if (namenodeAddressInUrl != null) { if (namenodeAddressInUrl != null) {
namenodeAddress = DFSUtil.getSocketAddress(namenodeAddressInUrl); namenodeAddress = NetUtils.createSocketAddr(namenodeAddressInUrl);
} else if (context != null) { } else if (context != null) {
namenodeAddress = NameNodeHttpServer.getNameNodeAddressFromContext( namenodeAddress = NameNodeHttpServer.getNameNodeAddressFromContext(
context); context);

View File

@ -46,15 +46,14 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
/** /**
* Performs two types of scanning: * Performs two types of scanning:
* <li> Gets block files from the data directories and reconciles the * <li> Gets block files from the data directories and reconciles the
* difference between the blocks on the disk and in memory in * difference between the blocks on the disk and in memory.</li>
* {@link FSDataset}</li>
* <li> Scans the data directories for block files under a block pool * <li> Scans the data directories for block files under a block pool
* and verifies that the files are not corrupt</li> * and verifies that the files are not corrupt</li>
* This keeps track of blocks and their last verification times. * This keeps track of blocks and their last verification times.
@ -78,7 +77,7 @@ class BlockPoolSliceScanner {
private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000; private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
private DataNode datanode; private DataNode datanode;
private FSDataset dataset; private final FSDatasetInterface dataset;
// sorted set // sorted set
private TreeSet<BlockScanInfo> blockInfoSet; private TreeSet<BlockScanInfo> blockInfoSet;
@ -137,8 +136,8 @@ class BlockPoolSliceScanner {
} }
} }
BlockPoolSliceScanner(DataNode datanode, FSDataset dataset, Configuration conf, BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset,
String bpid) { Configuration conf, String bpid) {
this.datanode = datanode; this.datanode = datanode;
this.dataset = dataset; this.dataset = dataset;
this.blockPoolId = bpid; this.blockPoolId = bpid;
@ -220,16 +219,16 @@ class BlockPoolSliceScanner {
* otherwise, pick the first directory. * otherwise, pick the first directory.
*/ */
File dir = null; File dir = null;
List<FSVolume> volumes = dataset.volumes.getVolumes(); List<FSVolumeInterface> volumes = dataset.getVolumes();
for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) { for (FSVolumeInterface vol : volumes) {
File bpDir = vol.getBlockPoolSlice(blockPoolId).getDirectory(); File bpDir = vol.getDirectory(blockPoolId);
if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) { if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
dir = bpDir; dir = bpDir;
break; break;
} }
} }
if (dir == null) { if (dir == null) {
dir = volumes.get(0).getBlockPoolSlice(blockPoolId).getDirectory(); dir = volumes.get(0).getDirectory(blockPoolId);
} }
try { try {
@ -577,8 +576,8 @@ class BlockPoolSliceScanner {
bytesLeft += len; bytesLeft += len;
} }
static File getCurrentFile(FSVolume vol, String bpid) throws IOException { static File getCurrentFile(FSVolumeInterface vol, String bpid) throws IOException {
return LogFileHandler.getCurrentFile(vol.getBlockPoolSlice(bpid).getDirectory(), return LogFileHandler.getCurrentFile(vol.getDirectory(bpid),
BlockPoolSliceScanner.verificationLogFile); BlockPoolSliceScanner.verificationLogFile);
} }

View File

@ -22,7 +22,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/************************************************** /**************************************************
* BlockVolumeChoosingPolicy allows a DataNode to * BlockVolumeChoosingPolicy allows a DataNode to
@ -46,7 +46,7 @@ public interface BlockVolumeChoosingPolicy {
* @return the chosen volume to store the block. * @return the chosen volume to store the block.
* @throws IOException when disks are unavailable or are full. * @throws IOException when disks are unavailable or are full.
*/ */
public FSVolume chooseVolume(List<FSVolume> volumes, long blockSize) public FSVolumeInterface chooseVolume(List<FSVolumeInterface> volumes, long blockSize)
throws IOException; throws IOException;
} }

View File

@ -27,12 +27,12 @@ import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/** /**
* DataBlockScanner manages block scanning for all the block pools. For each * DataBlockScanner manages block scanning for all the block pools. For each
@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFactory;
public class DataBlockScanner implements Runnable { public class DataBlockScanner implements Runnable {
public static final Log LOG = LogFactory.getLog(DataBlockScanner.class); public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
private final DataNode datanode; private final DataNode datanode;
private final FSDataset dataset; private final FSDatasetInterface dataset;
private final Configuration conf; private final Configuration conf;
/** /**
@ -55,7 +55,7 @@ public class DataBlockScanner implements Runnable {
new TreeMap<String, BlockPoolSliceScanner>(); new TreeMap<String, BlockPoolSliceScanner>();
Thread blockScannerThread = null; Thread blockScannerThread = null;
DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) { DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) {
this.datanode = datanode; this.datanode = datanode;
this.dataset = dataset; this.dataset = dataset;
this.conf = conf; this.conf = conf;
@ -135,7 +135,7 @@ public class DataBlockScanner implements Runnable {
.iterator(); .iterator();
while (bpidIterator.hasNext()) { while (bpidIterator.hasNext()) {
String bpid = bpidIterator.next(); String bpid = bpidIterator.next();
for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) { for (FSDatasetInterface.FSVolumeInterface vol : dataset.getVolumes()) {
try { try {
File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid); File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
if (currFile.exists()) { if (currFile.exists()) {

View File

@ -125,7 +125,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@ -582,11 +581,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) { DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
reason = "verification is turned off by configuration"; reason = "verification is turned off by configuration";
} else if (!(data instanceof FSDataset)) { } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
reason = "verifcation is supported only with FSDataset"; reason = "verifcation is not supported by SimulatedFSDataset";
} }
if (reason == null) { if (reason == null) {
blockScanner = new DataBlockScanner(this, (FSDataset)data, conf); blockScanner = new DataBlockScanner(this, data, conf);
blockScanner.start(); blockScanner.start();
} else { } else {
LOG.info("Periodic Block Verification scan is disabled because " + LOG.info("Periodic Block Verification scan is disabled because " +
@ -611,11 +610,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) { DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
reason = "verification is turned off by configuration"; reason = "verification is turned off by configuration";
} else if (!(data instanceof FSDataset)) { } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
reason = "verification is supported only with FSDataset"; reason = "verifcation is not supported by SimulatedFSDataset";
} }
if (reason == null) { if (reason == null) {
directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf); directoryScanner = new DirectoryScanner(this, data, conf);
directoryScanner.start(); directoryScanner.start();
} else { } else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " + LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@ -2237,16 +2236,7 @@ public class DataNode extends Configured
*/ */
@Override // DataNodeMXBean @Override // DataNodeMXBean
public String getVolumeInfo() { public String getVolumeInfo() {
final Map<String, Object> info = new HashMap<String, Object>(); return JSON.toString(data.getVolumeInfoMap());
Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
for (VolumeInfo v : volumes) {
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("usedSpace", v.usedSpace);
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
info.put(v.directory, innerInfo);
}
return JSON.toString(info);
} }
@Override // DataNodeMXBean @Override // DataNodeMXBean

View File

@ -751,7 +751,7 @@ public class DataStorage extends Storage {
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
if (matcher.matches()) { if (matcher.matches()) {
//return the current metadata file name //return the current metadata file name
return FSDataset.getMetaFileName(matcher.group(1), return DatanodeUtil.getMetaFileName(matcher.group(1),
GenerationStamp.GRANDFATHER_GENERATION_STAMP); GenerationStamp.GRANDFATHER_GENERATION_STAMP);
} }
return oldFileName; return oldFileName;

View File

@ -53,18 +53,30 @@ import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private @InterfaceAudience.Private
public class DatanodeJspHelper { public class DatanodeJspHelper {
private static DFSClient getDFSClient(final UserGroupInformation user, private static DFSClient getDFSClient(final UserGroupInformation user,
final InetSocketAddress addr, final String addr,
final Configuration conf final Configuration conf
) throws IOException, ) throws IOException,
InterruptedException { InterruptedException {
return return
user.doAs(new PrivilegedExceptionAction<DFSClient>() { user.doAs(new PrivilegedExceptionAction<DFSClient>() {
public DFSClient run() throws IOException { public DFSClient run() throws IOException {
return new DFSClient(addr, conf); return new DFSClient(NetUtils.createSocketAddr(addr), conf);
} }
}); });
} }
/**
* Internal convenience method for canonicalizing host name.
* @param addr name:port or name
* @return canonicalized host name
*/
private static String canonicalize(String addr) {
// default port 1 is supplied to allow addr without port.
// the port will be ignored.
return NetUtils.createSocketAddr(addr, 1).getAddress()
.getCanonicalHostName();
}
private static final SimpleDateFormat lsDateFormat = private static final SimpleDateFormat lsDateFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm"); new SimpleDateFormat("yyyy-MM-dd HH:mm");
@ -102,8 +114,7 @@ public class DatanodeJspHelper {
return; return;
} }
InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr); DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
String target = dir; String target = dir;
final HdfsFileStatus targetStatus = dfs.getFileInfo(target); final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
if (targetStatus == null) { // not exists if (targetStatus == null) { // not exists
@ -125,8 +136,7 @@ public class DatanodeJspHelper {
out.print("Empty file"); out.print("Empty file");
} else { } else {
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf); DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
String fqdn = InetAddress.getByName(chosenNode.getHost()) String fqdn = canonicalize(chosenNode.getHost());
.getCanonicalHostName();
String datanodeAddr = chosenNode.getName(); String datanodeAddr = chosenNode.getName();
int datanodePort = Integer.parseInt(datanodeAddr.substring( int datanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length())); datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
@ -210,9 +220,8 @@ public class DatanodeJspHelper {
JspHelper.addTableFooter(out); JspHelper.addTableFooter(out);
} }
} }
String namenodeHost = namenodeAddress.getHostName();
out.print("<br><a href=\"http://" out.print("<br><a href=\"http://"
+ InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":" + canonicalize(nnAddr) + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>"); + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close(); dfs.close();
} }
@ -282,8 +291,7 @@ public class DatanodeJspHelper {
} }
long blockSize = Long.parseLong(blockSizeStr); long blockSize = Long.parseLong(blockSizeStr);
final InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr); final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
final DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0, List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks(); Long.MAX_VALUE).getLocatedBlocks();
// Add the various links for looking at the file contents // Add the various links for looking at the file contents
@ -305,8 +313,7 @@ public class DatanodeJspHelper {
dfs.close(); dfs.close();
return; return;
} }
String fqdn = InetAddress.getByName(chosenNode.getHost()) String fqdn = canonicalize(chosenNode.getHost());
.getCanonicalHostName();
String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort() String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort + "&namenodeInfoPort=" + namenodeInfoPort
@ -345,9 +352,7 @@ public class DatanodeJspHelper {
// generate a table and dump the info // generate a table and dump the info
out.println("\n<table>"); out.println("\n<table>");
String namenodeHost = namenodeAddress.getHostName(); String nnCanonicalName = canonicalize(nnAddr);
String namenodeHostName = InetAddress.getByName(namenodeHost).getCanonicalHostName();
for (LocatedBlock cur : blocks) { for (LocatedBlock cur : blocks) {
out.print("<tr>"); out.print("<tr>");
final String blockidstring = Long.toString(cur.getBlock().getBlockId()); final String blockidstring = Long.toString(cur.getBlock().getBlockId());
@ -358,7 +363,7 @@ public class DatanodeJspHelper {
String datanodeAddr = locs[j].getName(); String datanodeAddr = locs[j].getName();
datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
.indexOf(':') + 1, datanodeAddr.length())); .indexOf(':') + 1, datanodeAddr.length()));
fqdn = InetAddress.getByName(locs[j].getHost()).getCanonicalHostName(); fqdn = canonicalize(locs[j].getHost());
String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort() String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
+ "/browseBlock.jsp?blockId=" + blockidstring + "/browseBlock.jsp?blockId=" + blockidstring
+ "&blockSize=" + blockSize + "&blockSize=" + blockSize
@ -370,7 +375,7 @@ public class DatanodeJspHelper {
+ JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
String blockInfoUrl = "http://" + namenodeHostName + ":" String blockInfoUrl = "http://" + nnCanonicalName + ":"
+ namenodeInfoPort + namenodeInfoPort
+ "/block_info_xml.jsp?blockId=" + blockidstring; + "/block_info_xml.jsp?blockId=" + blockidstring;
out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">" out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">"
@ -382,7 +387,7 @@ public class DatanodeJspHelper {
out.println("</table>"); out.println("</table>");
out.print("<hr>"); out.print("<hr>");
out.print("<br><a href=\"http://" out.print("<br><a href=\"http://"
+ InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":" + nnCanonicalName + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>"); + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close(); dfs.close();
} }
@ -419,8 +424,7 @@ public class DatanodeJspHelper {
return; return;
} }
final DFSClient dfs = getDFSClient(ugi, final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
DFSUtil.getSocketAddress(nnAddr), conf);
String bpid = null; String bpid = null;
Token<BlockTokenIdentifier> blockToken = BlockTokenSecretManager.DUMMY_TOKEN; Token<BlockTokenIdentifier> blockToken = BlockTokenSecretManager.DUMMY_TOKEN;
@ -518,8 +522,7 @@ public class DatanodeJspHelper {
String datanodeAddr = d.getName(); String datanodeAddr = d.getName();
nextDatanodePort = Integer.parseInt(datanodeAddr.substring( nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length())); datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
nextHost = InetAddress.getByName(d.getHost()) nextHost = d.getHost();
.getCanonicalHostName();
nextPort = d.getInfoPort(); nextPort = d.getInfoPort();
} }
} }
@ -533,7 +536,7 @@ public class DatanodeJspHelper {
} }
String nextUrl = null; String nextUrl = null;
if (nextBlockIdStr != null) { if (nextBlockIdStr != null) {
nextUrl = "http://" + nextHost + ":" + nextPort nextUrl = "http://" + canonicalize(nextHost) + ":" + nextPort
+ "/browseBlock.jsp?blockId=" + nextBlockIdStr + "/browseBlock.jsp?blockId=" + nextBlockIdStr
+ "&blockSize=" + nextBlockSize + "&blockSize=" + nextBlockSize
+ "&startOffset=" + nextStartOffset + "&startOffset=" + nextStartOffset
@ -573,8 +576,7 @@ public class DatanodeJspHelper {
String datanodeAddr = d.getName(); String datanodeAddr = d.getName();
prevDatanodePort = Integer.parseInt(datanodeAddr.substring( prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length())); datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
prevHost = InetAddress.getByName(d.getHost()) prevHost = d.getHost();
.getCanonicalHostName();
prevPort = d.getInfoPort(); prevPort = d.getInfoPort();
} }
} }
@ -591,7 +593,7 @@ public class DatanodeJspHelper {
String prevUrl = null; String prevUrl = null;
if (prevBlockIdStr != null) { if (prevBlockIdStr != null) {
prevUrl = "http://" + prevHost + ":" + prevPort prevUrl = "http://" + canonicalize(prevHost) + ":" + prevPort
+ "/browseBlock.jsp?blockId=" + prevBlockIdStr + "/browseBlock.jsp?blockId=" + prevBlockIdStr
+ "&blockSize=" + prevBlockSize + "&blockSize=" + prevBlockSize
+ "&startOffset=" + prevStartOffset + "&startOffset=" + prevStartOffset
@ -669,8 +671,7 @@ public class DatanodeJspHelper {
+ "\">"); + "\">");
// fetch the block from the datanode that has the last block for this file // fetch the block from the datanode that has the last block for this file
final DFSClient dfs = getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr), final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
conf);
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0, List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks(); Long.MAX_VALUE).getLocatedBlocks();
if (blocks == null || blocks.size() == 0) { if (blocks == null || blocks.size() == 0) {
@ -710,6 +711,6 @@ public class DatanodeJspHelper {
final DataNode datanode, final Configuration conf, final DataNode datanode, final Configuration conf,
final UserGroupInformation ugi) throws IOException, InterruptedException { final UserGroupInformation ugi) throws IOException, InterruptedException {
final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS); final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
return getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr), conf); return getDFSClient(ugi, nnAddr, conf);
} }
} }

View File

@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import java.io.File; import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -26,6 +28,10 @@ import org.apache.hadoop.hdfs.protocol.Block;
/** Provide utility methods for Datanode. */ /** Provide utility methods for Datanode. */
@InterfaceAudience.Private @InterfaceAudience.Private
class DatanodeUtil { class DatanodeUtil {
static final String METADATA_EXTENSION = ".meta";
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
private final static String DISK_ERROR = "Possible disk error on file creation: "; private final static String DISK_ERROR = "Possible disk error on file creation: ";
/** Get the cause of an I/O exception if caused by a possible disk error /** Get the cause of an I/O exception if caused by a possible disk error
@ -64,4 +70,37 @@ class DatanodeUtil {
} }
return f; return f;
} }
static String getMetaFileName(String blockFileName, long genStamp) {
return blockFileName + "_" + genStamp + METADATA_EXTENSION;
}
static File getMetaFile(File f, long genStamp) {
return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
}
/** Find the corresponding meta data file from a given block file */
static File findMetaFile(final File blockFile) throws IOException {
final String prefix = blockFile.getName() + "_";
final File parent = blockFile.getParentFile();
File[] matches = parent.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return dir.equals(parent)
&& name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
}
});
if (matches == null || matches.length == 0) {
throw new IOException("Meta file not found, blockFile=" + blockFile);
}
else if (matches.length > 1) {
throw new IOException("Found more than one meta files: "
+ Arrays.asList(matches));
}
return matches[0];
}
static File getUnlinkTmpFile(File f) {
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
}
} }

View File

@ -43,20 +43,19 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
/** /**
* Periodically scans the data directories for block and block metadata files. * Periodically scans the data directories for block and block metadata files.
* Reconciles the differences with block information maintained in * Reconciles the differences with block information maintained in the dataset.
* {@link FSDataset}
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class DirectoryScanner implements Runnable { public class DirectoryScanner implements Runnable {
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class); private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
private final DataNode datanode; private final DataNode datanode;
private final FSDataset dataset; private final FSDatasetInterface dataset;
private final ExecutorService reportCompileThreadPool; private final ExecutorService reportCompileThreadPool;
private final ScheduledExecutorService masterThread; private final ScheduledExecutorService masterThread;
private final long scanPeriodMsecs; private final long scanPeriodMsecs;
@ -158,13 +157,13 @@ public class DirectoryScanner implements Runnable {
private final long blockId; private final long blockId;
private final File metaFile; private final File metaFile;
private final File blockFile; private final File blockFile;
private final FSVolume volume; private final FSVolumeInterface volume;
ScanInfo(long blockId) { ScanInfo(long blockId) {
this(blockId, null, null, null); this(blockId, null, null, null);
} }
ScanInfo(long blockId, File blockFile, File metaFile, FSVolume vol) { ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
this.blockId = blockId; this.blockId = blockId;
this.metaFile = metaFile; this.metaFile = metaFile;
this.blockFile = blockFile; this.blockFile = blockFile;
@ -183,7 +182,7 @@ public class DirectoryScanner implements Runnable {
return blockId; return blockId;
} }
FSVolume getVolume() { FSVolumeInterface getVolume() {
return volume; return volume;
} }
@ -220,7 +219,7 @@ public class DirectoryScanner implements Runnable {
} }
} }
DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) { DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) {
this.datanode = dn; this.datanode = dn;
this.dataset = dataset; this.dataset = dataset;
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@ -269,7 +268,7 @@ public class DirectoryScanner implements Runnable {
return; return;
} }
String[] bpids = dataset.getBPIdlist(); String[] bpids = dataset.getBlockPoolList();
for(String bpid : bpids) { for(String bpid : bpids) {
UpgradeManagerDatanode um = UpgradeManagerDatanode um =
datanode.getUpgradeManagerDatanode(bpid); datanode.getUpgradeManagerDatanode(bpid);
@ -411,17 +410,29 @@ public class DirectoryScanner implements Runnable {
diffRecord.add(new ScanInfo(blockId)); diffRecord.add(new ScanInfo(blockId));
} }
/** Is the given volume still valid in the dataset? */
private static boolean isValid(final FSDatasetInterface dataset,
final FSVolumeInterface volume) {
for (FSVolumeInterface vol : dataset.getVolumes()) {
if (vol == volume) {
return true;
}
}
return false;
}
/** Get lists of blocks on the disk sorted by blockId, per blockpool */ /** Get lists of blocks on the disk sorted by blockId, per blockpool */
private Map<String, ScanInfo[]> getDiskReport() { private Map<String, ScanInfo[]> getDiskReport() {
// First get list of data directories // First get list of data directories
List<FSVolume> volumes = dataset.volumes.getVolumes(); final List<FSVolumeInterface> volumes = dataset.getVolumes();
ArrayList<ScanInfoPerBlockPool> dirReports = ArrayList<ScanInfoPerBlockPool> dirReports =
new ArrayList<ScanInfoPerBlockPool>(volumes.size()); new ArrayList<ScanInfoPerBlockPool>(volumes.size());
Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress = Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
new HashMap<Integer, Future<ScanInfoPerBlockPool>>(); new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
for (int i = 0; i < volumes.size(); i++) { for (int i = 0; i < volumes.size(); i++) {
if (!dataset.volumes.isValid(volumes.get(i))) { // volume is still valid if (!isValid(dataset, volumes.get(i))) {
// volume is invalid
dirReports.add(i, null); dirReports.add(i, null);
} else { } else {
ReportCompiler reportCompiler = ReportCompiler reportCompiler =
@ -446,7 +457,8 @@ public class DirectoryScanner implements Runnable {
// Compile consolidated report for all the volumes // Compile consolidated report for all the volumes
ScanInfoPerBlockPool list = new ScanInfoPerBlockPool(); ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
for (int i = 0; i < volumes.size(); i++) { for (int i = 0; i < volumes.size(); i++) {
if (dataset.volumes.isValid(volumes.get(i))) { // volume is still valid if (isValid(dataset, volumes.get(i))) {
// volume is still valid
list.addAll(dirReports.get(i)); list.addAll(dirReports.get(i));
} }
} }
@ -461,9 +473,9 @@ public class DirectoryScanner implements Runnable {
private static class ReportCompiler private static class ReportCompiler
implements Callable<ScanInfoPerBlockPool> { implements Callable<ScanInfoPerBlockPool> {
private FSVolume volume; private FSVolumeInterface volume;
public ReportCompiler(FSVolume volume) { public ReportCompiler(FSVolumeInterface volume) {
this.volume = volume; this.volume = volume;
} }
@ -473,14 +485,14 @@ public class DirectoryScanner implements Runnable {
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length); ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
for (String bpid : bpList) { for (String bpid : bpList) {
LinkedList<ScanInfo> report = new LinkedList<ScanInfo>(); LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
File bpFinalizedDir = volume.getBlockPoolSlice(bpid).getFinalizedDir(); File bpFinalizedDir = volume.getFinalizedDir(bpid);
result.put(bpid, compileReport(volume, bpFinalizedDir, report)); result.put(bpid, compileReport(volume, bpFinalizedDir, report));
} }
return result; return result;
} }
/** Compile list {@link ScanInfo} for the blocks in the directory <dir> */ /** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
private LinkedList<ScanInfo> compileReport(FSVolume vol, File dir, private LinkedList<ScanInfo> compileReport(FSVolumeInterface vol, File dir,
LinkedList<ScanInfo> report) { LinkedList<ScanInfo> report) {
File[] files; File[] files;
try { try {

View File

@ -23,7 +23,6 @@ import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
@ -81,14 +80,13 @@ class FSDataset implements FSDatasetInterface {
* A node type that can be built into a tree reflecting the * A node type that can be built into a tree reflecting the
* hierarchy of blocks on the local disk. * hierarchy of blocks on the local disk.
*/ */
class FSDir { private class FSDir {
File dir; final File dir;
int numBlocks = 0; int numBlocks = 0;
FSDir children[]; FSDir children[];
int lastChildIdx = 0; int lastChildIdx = 0;
/**
*/ private FSDir(File dir)
public FSDir(File dir)
throws IOException { throws IOException {
this.dir = dir; this.dir = dir;
this.children = null; this.children = null;
@ -113,7 +111,7 @@ class FSDataset implements FSDatasetInterface {
} }
} }
public File addBlock(Block b, File src) throws IOException { private File addBlock(Block b, File src) throws IOException {
//First try without creating subdirectories //First try without creating subdirectories
File file = addBlock(b, src, false, false); File file = addBlock(b, src, false, false);
return (file != null) ? file : addBlock(b, src, true, true); return (file != null) ? file : addBlock(b, src, true, true);
@ -161,7 +159,7 @@ class FSDataset implements FSDatasetInterface {
return children[ lastChildIdx ].addBlock(b, src, true, false); return children[ lastChildIdx ].addBlock(b, src, true, false);
} }
void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume) private void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
throws IOException { throws IOException {
if (children != null) { if (children != null) {
for (int i = 0; i < children.length; i++) { for (int i = 0; i < children.length; i++) {
@ -207,7 +205,7 @@ class FSDataset implements FSDatasetInterface {
* check if a data diretory is healthy * check if a data diretory is healthy
* @throws DiskErrorException * @throws DiskErrorException
*/ */
public void checkDirTree() throws DiskErrorException { private void checkDirTree() throws DiskErrorException {
DiskChecker.checkDir(dir); DiskChecker.checkDir(dir);
if (children != null) { if (children != null) {
@ -217,7 +215,7 @@ class FSDataset implements FSDatasetInterface {
} }
} }
void clearPath(File f) { private void clearPath(File f) {
String root = dir.getAbsolutePath(); String root = dir.getAbsolutePath();
String dir = f.getAbsolutePath(); String dir = f.getAbsolutePath();
if (dir.startsWith(root)) { if (dir.startsWith(root)) {
@ -271,6 +269,7 @@ class FSDataset implements FSDatasetInterface {
return false; return false;
} }
@Override
public String toString() { public String toString() {
return "FSDir{" + return "FSDir{" +
"dir=" + dir + "dir=" + dir +
@ -284,7 +283,7 @@ class FSDataset implements FSDatasetInterface {
* Taken together, all BlockPoolSlices sharing a block pool ID across a * Taken together, all BlockPoolSlices sharing a block pool ID across a
* cluster represent a single block pool. * cluster represent a single block pool.
*/ */
class BlockPoolSlice { private class BlockPoolSlice {
private final String bpid; private final String bpid;
private final FSVolume volume; // volume to which this BlockPool belongs to private final FSVolume volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current private final File currentDir; // StorageDirectory/current/bpid/current
@ -343,10 +342,6 @@ class FSDataset implements FSDatasetInterface {
return currentDir.getParentFile(); return currentDir.getParentFile();
} }
File getCurrentDir() {
return currentDir;
}
File getFinalizedDir() { File getFinalizedDir() {
return finalizedDir.dir; return finalizedDir.dir;
} }
@ -387,7 +382,7 @@ class FSDataset implements FSDatasetInterface {
File addBlock(Block b, File f) throws IOException { File addBlock(Block b, File f) throws IOException {
File blockFile = finalizedDir.addBlock(b, f); File blockFile = finalizedDir.addBlock(b, f);
File metaFile = getMetaFile(blockFile , b.getGenerationStamp()); File metaFile = DatanodeUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile; return blockFile;
} }
@ -455,7 +450,7 @@ class FSDataset implements FSDatasetInterface {
DataInputStream checksumIn = null; DataInputStream checksumIn = null;
InputStream blockIn = null; InputStream blockIn = null;
try { try {
File metaFile = new File(getMetaFileName(blockFile.toString(), genStamp)); final File metaFile = DatanodeUtil.getMetaFile(blockFile, genStamp);
long blockFileLen = blockFile.length(); long blockFileLen = blockFile.length();
long metaFileLen = metaFile.length(); long metaFileLen = metaFile.length();
int crcHeaderLen = DataChecksum.getChecksumHeaderSize(); int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
@ -521,7 +516,7 @@ class FSDataset implements FSDatasetInterface {
} }
} }
class FSVolume { class FSVolume implements FSVolumeInterface {
private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>(); private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
private final File currentDir; // <StorageDirectory>/current private final File currentDir; // <StorageDirectory>/current
private final DF usage; private final DF usage;
@ -535,11 +530,6 @@ class FSDataset implements FSDatasetInterface {
this.usage = new DF(parent, conf); this.usage = new DF(parent, conf);
} }
/** Return storage directory corresponding to the volume */
File getDir() {
return currentDir.getParentFile();
}
File getCurrentDir() { File getCurrentDir() {
return currentDir; return currentDir;
} }
@ -584,7 +574,8 @@ class FSDataset implements FSDatasetInterface {
return remaining > 0 ? remaining : 0; return remaining > 0 ? remaining : 0;
} }
long getAvailable() throws IOException { @Override
public long getAvailable() throws IOException {
long remaining = getCapacity()-getDfsUsed(); long remaining = getCapacity()-getDfsUsed();
long available = usage.getAvailable(); long available = usage.getAvailable();
if (remaining>available) { if (remaining>available) {
@ -601,7 +592,7 @@ class FSDataset implements FSDatasetInterface {
return usage.getMount(); return usage.getMount();
} }
BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException { private BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
BlockPoolSlice bp = map.get(bpid); BlockPoolSlice bp = map.get(bpid);
if (bp == null) { if (bp == null) {
throw new IOException("block pool " + bpid + " is not found"); throw new IOException("block pool " + bpid + " is not found");
@ -609,10 +600,21 @@ class FSDataset implements FSDatasetInterface {
return bp; return bp;
} }
@Override
public File getDirectory(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getDirectory();
}
@Override
public File getFinalizedDir(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getFinalizedDir();
}
/** /**
* Make a deep copy of the list of currently active BPIDs * Make a deep copy of the list of currently active BPIDs
*/ */
String[] getBlockPoolList() { @Override
public String[] getBlockPoolList() {
synchronized(FSDataset.this) { synchronized(FSDataset.this) {
return map.keySet().toArray(new String[map.keySet().size()]); return map.keySet().toArray(new String[map.keySet().size()]);
} }
@ -682,6 +684,7 @@ class FSDataset implements FSDatasetInterface {
bp.clearPath(f); bp.clearPath(f);
} }
@Override
public String toString() { public String toString() {
return currentDir.getAbsolutePath(); return currentDir.getAbsolutePath();
} }
@ -773,21 +776,18 @@ class FSDataset implements FSDatasetInterface {
* Read access to this unmodifiable list is not synchronized. * Read access to this unmodifiable list is not synchronized.
* This list is replaced on modification holding "this" lock. * This list is replaced on modification holding "this" lock.
*/ */
private volatile List<FSVolume> volumes = null; private volatile List<FSVolumeInterface> volumes = null;
BlockVolumeChoosingPolicy blockChooser; BlockVolumeChoosingPolicy blockChooser;
int numFailedVolumes; int numFailedVolumes;
FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) { FSVolumeSet(List<FSVolumeInterface> volumes, int failedVols,
List<FSVolume> list = Arrays.asList(volumes); BlockVolumeChoosingPolicy blockChooser) {
this.volumes = Collections.unmodifiableList(list); this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser; this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols; this.numFailedVolumes = failedVols;
} }
private int numberOfVolumes() {
return volumes.size();
}
private int numberOfFailedVolumes() { private int numberOfFailedVolumes() {
return numFailedVolumes; return numFailedVolumes;
} }
@ -800,36 +800,36 @@ class FSDataset implements FSDatasetInterface {
* @return next volume to store the block in. * @return next volume to store the block in.
*/ */
synchronized FSVolume getNextVolume(long blockSize) throws IOException { synchronized FSVolume getNextVolume(long blockSize) throws IOException {
return blockChooser.chooseVolume(volumes, blockSize); return (FSVolume)blockChooser.chooseVolume(volumes, blockSize);
} }
private long getDfsUsed() throws IOException { private long getDfsUsed() throws IOException {
long dfsUsed = 0L; long dfsUsed = 0L;
for (FSVolume vol : volumes) { for (FSVolumeInterface v : volumes) {
dfsUsed += vol.getDfsUsed(); dfsUsed += ((FSVolume)v).getDfsUsed();
} }
return dfsUsed; return dfsUsed;
} }
private long getBlockPoolUsed(String bpid) throws IOException { private long getBlockPoolUsed(String bpid) throws IOException {
long dfsUsed = 0L; long dfsUsed = 0L;
for (FSVolume vol : volumes) { for (FSVolumeInterface v : volumes) {
dfsUsed += vol.getBlockPoolUsed(bpid); dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid);
} }
return dfsUsed; return dfsUsed;
} }
private long getCapacity() throws IOException { private long getCapacity() throws IOException {
long capacity = 0L; long capacity = 0L;
for (FSVolume vol : volumes) { for (FSVolumeInterface v : volumes) {
capacity += vol.getCapacity(); capacity += ((FSVolume)v).getCapacity();
} }
return capacity; return capacity;
} }
private long getRemaining() throws IOException { private long getRemaining() throws IOException {
long remaining = 0L; long remaining = 0L;
for (FSVolume vol : volumes) { for (FSVolumeInterface vol : volumes) {
remaining += vol.getAvailable(); remaining += vol.getAvailable();
} }
return remaining; return remaining;
@ -837,15 +837,15 @@ class FSDataset implements FSDatasetInterface {
private void getVolumeMap(ReplicasMap volumeMap) private void getVolumeMap(ReplicasMap volumeMap)
throws IOException { throws IOException {
for (FSVolume vol : volumes) { for (FSVolumeInterface v : volumes) {
vol.getVolumeMap(volumeMap); ((FSVolume)v).getVolumeMap(volumeMap);
} }
} }
private void getVolumeMap(String bpid, ReplicasMap volumeMap) private void getVolumeMap(String bpid, ReplicasMap volumeMap)
throws IOException { throws IOException {
for (FSVolume vol : volumes) { for (FSVolumeInterface v : volumes) {
vol.getVolumeMap(bpid, volumeMap); ((FSVolume)v).getVolumeMap(bpid, volumeMap);
} }
} }
@ -861,10 +861,10 @@ class FSDataset implements FSDatasetInterface {
ArrayList<FSVolume> removedVols = null; ArrayList<FSVolume> removedVols = null;
// Make a copy of volumes for performing modification // Make a copy of volumes for performing modification
List<FSVolume> volumeList = new ArrayList<FSVolume>(getVolumes()); final List<FSVolumeInterface> volumeList = new ArrayList<FSVolumeInterface>(volumes);
for (int idx = 0; idx < volumeList.size(); idx++) { for (int idx = 0; idx < volumeList.size(); idx++) {
FSVolume fsv = volumeList.get(idx); FSVolume fsv = (FSVolume)volumeList.get(idx);
try { try {
fsv.checkDirs(); fsv.checkDirs();
} catch (DiskErrorException e) { } catch (DiskErrorException e) {
@ -881,8 +881,8 @@ class FSDataset implements FSDatasetInterface {
// Remove null volumes from the volumes array // Remove null volumes from the volumes array
if (removedVols != null && removedVols.size() > 0) { if (removedVols != null && removedVols.size() > 0) {
List<FSVolume> newVols = new ArrayList<FSVolume>(); List<FSVolumeInterface> newVols = new ArrayList<FSVolumeInterface>();
for (FSVolume vol : volumeList) { for (FSVolumeInterface vol : volumeList) {
if (vol != null) { if (vol != null) {
newVols.add(vol); newVols.add(vol);
} }
@ -896,43 +896,29 @@ class FSDataset implements FSDatasetInterface {
return removedVols; return removedVols;
} }
@Override
public String toString() { public String toString() {
return volumes.toString(); return volumes.toString();
} }
boolean isValid(FSVolume volume) {
for (FSVolume vol : volumes) {
if (vol == volume) {
return true;
}
}
return false;
}
private void addBlockPool(String bpid, Configuration conf) private void addBlockPool(String bpid, Configuration conf)
throws IOException { throws IOException {
for (FSVolume v : volumes) { for (FSVolumeInterface v : volumes) {
v.addBlockPool(bpid, conf); ((FSVolume)v).addBlockPool(bpid, conf);
} }
} }
private void removeBlockPool(String bpid) { private void removeBlockPool(String bpid) {
for (FSVolume v : volumes) { for (FSVolumeInterface v : volumes) {
v.shutdownBlockPool(bpid); ((FSVolume)v).shutdownBlockPool(bpid);
} }
} }
/**
* @return unmodifiable list of volumes
*/
public List<FSVolume> getVolumes() {
return volumes;
}
private void shutdown() { private void shutdown() {
for (FSVolume volume : volumes) { for (FSVolumeInterface volume : volumes) {
if(volume != null) { if(volume != null) {
volume.shutdown(); ((FSVolume)volume).shutdown();
} }
} }
} }
@ -944,35 +930,20 @@ class FSDataset implements FSDatasetInterface {
// //
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
//Find better place?
static final String METADATA_EXTENSION = ".meta";
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
private static boolean isUnlinkTmpFile(File f) { private static boolean isUnlinkTmpFile(File f) {
String name = f.getName(); String name = f.getName();
return name.endsWith(UNLINK_BLOCK_SUFFIX); return name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
static File getUnlinkTmpFile(File f) {
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
} }
private static File getOrigFile(File unlinkTmpFile) { private static File getOrigFile(File unlinkTmpFile) {
String fileName = unlinkTmpFile.getName(); String fileName = unlinkTmpFile.getName();
return new File(unlinkTmpFile.getParentFile(), return new File(unlinkTmpFile.getParentFile(),
fileName.substring(0, fileName.length()-UNLINK_BLOCK_SUFFIX.length())); fileName.substring(0,
} fileName.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length()));
static String getMetaFileName(String blockFileName, long genStamp) {
return blockFileName + "_" + genStamp + METADATA_EXTENSION;
}
static File getMetaFile(File f , long genStamp) {
return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
} }
protected File getMetaFile(ExtendedBlock b) throws IOException { protected File getMetaFile(ExtendedBlock b) throws IOException {
return getMetaFile(getBlockFile(b), b.getGenerationStamp()); return DatanodeUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
} }
/** Find the metadata file for the specified block file. /** Find the metadata file for the specified block file.
@ -995,33 +966,12 @@ class FSDataset implements FSDatasetInterface {
return GenerationStamp.GRANDFATHER_GENERATION_STAMP; return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
} }
/** Find the corresponding meta data file from a given block file */
private static File findMetaFile(final File blockFile) throws IOException {
final String prefix = blockFile.getName() + "_";
final File parent = blockFile.getParentFile();
File[] matches = parent.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return dir.equals(parent)
&& name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
}
});
if (matches == null || matches.length == 0) {
throw new IOException("Meta file not found, blockFile=" + blockFile);
}
else if (matches.length > 1) {
throw new IOException("Found more than one meta files: "
+ Arrays.asList(matches));
}
return matches[0];
}
/** Find the corresponding meta data file from a given block file */ /** Find the corresponding meta data file from a given block file */
private static long parseGenerationStamp(File blockFile, File metaFile private static long parseGenerationStamp(File blockFile, File metaFile
) throws IOException { ) throws IOException {
String metaname = metaFile.getName(); String metaname = metaFile.getName();
String gs = metaname.substring(blockFile.getName().length() + 1, String gs = metaname.substring(blockFile.getName().length() + 1,
metaname.length() - METADATA_EXTENSION.length()); metaname.length() - DatanodeUtil.METADATA_EXTENSION.length());
try { try {
return Long.parseLong(gs); return Long.parseLong(gs);
} catch(NumberFormatException nfe) { } catch(NumberFormatException nfe) {
@ -1030,6 +980,11 @@ class FSDataset implements FSDatasetInterface {
} }
} }
@Override // FSDatasetInterface
public List<FSVolumeInterface> getVolumes() {
return volumes.volumes;
}
@Override // FSDatasetInterface @Override // FSDatasetInterface
public synchronized Block getStoredBlock(String bpid, long blkid) public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException { throws IOException {
@ -1037,7 +992,7 @@ class FSDataset implements FSDatasetInterface {
if (blockfile == null) { if (blockfile == null) {
return null; return null;
} }
File metafile = findMetaFile(blockfile); final File metafile = DatanodeUtil.findMetaFile(blockfile);
return new Block(blkid, blockfile.length(), return new Block(blkid, blockfile.length(),
parseGenerationStamp(blockfile, metafile)); parseGenerationStamp(blockfile, metafile));
} }
@ -1101,7 +1056,7 @@ class FSDataset implements FSDatasetInterface {
/** /**
* An FSDataset has a directory where it loads its data files. * An FSDataset has a directory where it loads its data files.
*/ */
public FSDataset(DataNode datanode, DataStorage storage, Configuration conf) FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
throws IOException { throws IOException {
this.datanode = datanode; this.datanode = datanode;
this.maxBlocksPerDir = this.maxBlocksPerDir =
@ -1134,12 +1089,12 @@ class FSDataset implements FSDatasetInterface {
+ ", volume failures tolerated: " + volFailuresTolerated); + ", volume failures tolerated: " + volFailuresTolerated);
} }
FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()]; final List<FSVolumeInterface> volArray = new ArrayList<FSVolumeInterface>(
storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), final File dir = storage.getStorageDir(idx).getCurrentDir();
conf); volArray.add(new FSVolume(dir, conf));
DataNode.LOG.info("FSDataset added volume - " DataNode.LOG.info("FSDataset added volume - " + dir);
+ storage.getStorageDir(idx).getCurrentDir());
} }
volumeMap = new ReplicasMap(this); volumeMap = new ReplicasMap(this);
@ -1185,7 +1140,7 @@ class FSDataset implements FSDatasetInterface {
*/ */
@Override // FSDatasetInterface @Override // FSDatasetInterface
public boolean hasEnoughResource() { public boolean hasEnoughResource() {
return volumes.numberOfVolumes() >= validVolsRequired; return getVolumes().size() >= validVolsRequired;
} }
/** /**
@ -1368,8 +1323,8 @@ class FSDataset implements FSDatasetInterface {
private static File moveBlockFiles(Block b, File srcfile, File destdir private static File moveBlockFiles(Block b, File srcfile, File destdir
) throws IOException { ) throws IOException {
final File dstfile = new File(destdir, b.getBlockName()); final File dstfile = new File(destdir, b.getBlockName());
final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp()); final File srcmeta = DatanodeUtil.getMetaFile(srcfile, b.getGenerationStamp());
final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp()); final File dstmeta = DatanodeUtil.getMetaFile(dstfile, b.getGenerationStamp());
if (!srcmeta.renameTo(dstmeta)) { if (!srcmeta.renameTo(dstmeta)) {
throw new IOException("Failed to move meta file for " + b throw new IOException("Failed to move meta file for " + b
+ " from " + srcmeta + " to " + dstmeta); + " from " + srcmeta + " to " + dstmeta);
@ -1487,7 +1442,7 @@ class FSDataset implements FSDatasetInterface {
// construct a RBW replica with the new GS // construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile(); File blkfile = replicaInfo.getBlockFile();
FSVolume v = replicaInfo.getVolume(); FSVolume v = (FSVolume)replicaInfo.getVolume();
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) { if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
throw new DiskOutOfSpaceException("Insufficient space for appending to " throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo); + replicaInfo);
@ -1744,7 +1699,7 @@ class FSDataset implements FSDatasetInterface {
+ visible + ", temp=" + temp); + visible + ", temp=" + temp);
} }
// check volume // check volume
final FSVolume v = temp.getVolume(); final FSVolume v = (FSVolume)temp.getVolume();
if (v == null) { if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp); throw new IOException("r.getVolume() = null, temp=" + temp);
} }
@ -1805,7 +1760,7 @@ class FSDataset implements FSDatasetInterface {
if ( vol == null ) { if ( vol == null ) {
ReplicaInfo replica = volumeMap.get(bpid, blk); ReplicaInfo replica = volumeMap.get(bpid, blk);
if (replica != null) { if (replica != null) {
vol = volumeMap.get(bpid, blk).getVolume(); vol = (FSVolume)volumeMap.get(bpid, blk).getVolume();
} }
if ( vol == null ) { if ( vol == null ) {
throw new IOException("Could not find volume for block " + blk); throw new IOException("Could not find volume for block " + blk);
@ -1845,7 +1800,7 @@ class FSDataset implements FSDatasetInterface {
newReplicaInfo = (FinalizedReplica) newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica(); ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
} else { } else {
FSVolume v = replicaInfo.getVolume(); FSVolume v = (FSVolume)replicaInfo.getVolume();
File f = replicaInfo.getBlockFile(); File f = replicaInfo.getBlockFile();
if (v == null) { if (v == null) {
throw new IOException("No volume for temporary file " + f + throw new IOException("No volume for temporary file " + f +
@ -1943,7 +1898,8 @@ class FSDataset implements FSDatasetInterface {
/** /**
* Get the list of finalized blocks from in-memory blockmap for a block pool. * Get the list of finalized blocks from in-memory blockmap for a block pool.
*/ */
synchronized List<Block> getFinalizedBlocks(String bpid) { @Override
public synchronized List<Block> getFinalizedBlocks(String bpid) {
ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid)); ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) { for (ReplicaInfo b : volumeMap.replicas(bpid)) {
if(b.getState() == ReplicaState.FINALIZED) { if(b.getState() == ReplicaState.FINALIZED) {
@ -2016,7 +1972,7 @@ class FSDataset implements FSDatasetInterface {
} }
//check replica's meta file //check replica's meta file
final File metafile = getMetaFile(f, r.getGenerationStamp()); final File metafile = DatanodeUtil.getMetaFile(f, r.getGenerationStamp());
if (!metafile.exists()) { if (!metafile.exists()) {
throw new IOException("Metafile " + metafile + " does not exist, r=" + r); throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
} }
@ -2047,7 +2003,7 @@ class FSDataset implements FSDatasetInterface {
error = true; error = true;
continue; continue;
} }
v = dinfo.getVolume(); v = (FSVolume)dinfo.getVolume();
if (f == null) { if (f == null) {
DataNode.LOG.warn("Unexpected error trying to delete block " DataNode.LOG.warn("Unexpected error trying to delete block "
+ invalidBlks[i] + + invalidBlks[i] +
@ -2081,7 +2037,7 @@ class FSDataset implements FSDatasetInterface {
} }
volumeMap.remove(bpid, invalidBlks[i]); volumeMap.remove(bpid, invalidBlks[i]);
} }
File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp()); File metaFile = DatanodeUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp());
// Delete the block asynchronously to make sure we can do it fast enough // Delete the block asynchronously to make sure we can do it fast enough
asyncDiskService.deleteAsync(v, f, metaFile, asyncDiskService.deleteAsync(v, f, metaFile,
@ -2238,8 +2194,9 @@ class FSDataset implements FSDatasetInterface {
* @param diskMetaFile Metadata file from on the disk * @param diskMetaFile Metadata file from on the disk
* @param vol Volume of the block file * @param vol Volume of the block file
*/ */
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile, public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FSVolume vol) { File diskMetaFile, FSVolumeInterface vol) {
Block corruptBlock = null; Block corruptBlock = null;
ReplicaInfo memBlockInfo; ReplicaInfo memBlockInfo;
synchronized (this) { synchronized (this) {
@ -2327,7 +2284,7 @@ class FSDataset implements FSDatasetInterface {
// Compare generation stamp // Compare generation stamp
if (memBlockInfo.getGenerationStamp() != diskGS) { if (memBlockInfo.getGenerationStamp() != diskGS) {
File memMetaFile = getMetaFile(diskFile, File memMetaFile = DatanodeUtil.getMetaFile(diskFile,
memBlockInfo.getGenerationStamp()); memBlockInfo.getGenerationStamp());
if (memMetaFile.exists()) { if (memMetaFile.exists()) {
if (memMetaFile.compareTo(diskMetaFile) != 0) { if (memMetaFile.compareTo(diskMetaFile) != 0) {
@ -2562,18 +2519,15 @@ class FSDataset implements FSDatasetInterface {
volumes.removeBlockPool(bpid); volumes.removeBlockPool(bpid);
} }
/** @Override
* get list of all bpids public String[] getBlockPoolList() {
* @return list of bpids
*/
public String [] getBPIdlist() throws IOException {
return volumeMap.getBlockPoolList(); return volumeMap.getBlockPoolList();
} }
/** /**
* Class for representing the Datanode volume information * Class for representing the Datanode volume information
*/ */
static class VolumeInfo { private static class VolumeInfo {
final String directory; final String directory;
final long usedSpace; final long usedSpace;
final long freeSpace; final long freeSpace;
@ -2587,9 +2541,10 @@ class FSDataset implements FSDatasetInterface {
} }
} }
Collection<VolumeInfo> getVolumeInfo() { private Collection<VolumeInfo> getVolumeInfo() {
Collection<VolumeInfo> info = new ArrayList<VolumeInfo>(); Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
for (FSVolume volume : volumes.volumes) { for (FSVolumeInterface v : volumes.volumes) {
final FSVolume volume = (FSVolume)v;
long used = 0; long used = 0;
long free = 0; long free = 0;
try { try {
@ -2607,12 +2562,26 @@ class FSDataset implements FSDatasetInterface {
return info; return info;
} }
@Override
public Map<String, Object> getVolumeInfoMap() {
final Map<String, Object> info = new HashMap<String, Object>();
Collection<VolumeInfo> volumes = getVolumeInfo();
for (VolumeInfo v : volumes) {
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("usedSpace", v.usedSpace);
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
info.put(v.directory, innerInfo);
}
return info;
}
@Override //FSDatasetInterface @Override //FSDatasetInterface
public synchronized void deleteBlockPool(String bpid, boolean force) public synchronized void deleteBlockPool(String bpid, boolean force)
throws IOException { throws IOException {
if (!force) { if (!force) {
for (FSVolume volume : volumes.volumes) { for (FSVolumeInterface volume : volumes.volumes) {
if (!volume.isBPDirEmpty(bpid)) { if (!((FSVolume)volume).isBPDirEmpty(bpid)) {
DataNode.LOG.warn(bpid DataNode.LOG.warn(bpid
+ " has some block files, cannot delete unless forced"); + " has some block files, cannot delete unless forced");
throw new IOException("Cannot delete block pool, " throw new IOException("Cannot delete block pool, "
@ -2620,8 +2589,8 @@ class FSDataset implements FSDatasetInterface {
} }
} }
} }
for (FSVolume volume : volumes.volumes) { for (FSVolumeInterface volume : volumes.volumes) {
volume.deleteBPDirectories(bpid, force); ((FSVolume)volume).deleteBPDirectories(bpid, force);
} }
} }
@ -2629,7 +2598,7 @@ class FSDataset implements FSDatasetInterface {
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException { throws IOException {
File datafile = getBlockFile(block); File datafile = getBlockFile(block);
File metafile = getMetaFile(datafile, block.getGenerationStamp()); File metafile = DatanodeUtil.getMetaFile(datafile, block.getGenerationStamp());
BlockLocalPathInfo info = new BlockLocalPathInfo(block, BlockLocalPathInfo info = new BlockLocalPathInfo(block,
datafile.getAbsolutePath(), metafile.getAbsolutePath()); datafile.getAbsolutePath(), metafile.getAbsolutePath());
return info; return info;

View File

@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.Closeable; import java.io.Closeable;
import java.io.File;
import java.io.FilterInputStream; import java.io.FilterInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -46,7 +49,43 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public interface FSDatasetInterface extends FSDatasetMBean { public interface FSDatasetInterface extends FSDatasetMBean {
/**
* This is an interface for the underlying volume.
* @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
*/
interface FSVolumeInterface {
/** @return a list of block pools. */
public String[] getBlockPoolList();
/** @return the available storage space in bytes. */
public long getAvailable() throws IOException;
/** @return the directory for the block pool. */
public File getDirectory(String bpid) throws IOException;
/** @return the directory for the finalized blocks in the block pool. */
public File getFinalizedDir(String bpid) throws IOException;
}
/** @return a list of volumes. */
public List<FSVolumeInterface> getVolumes();
/** @return a volume information map (name => info). */
public Map<String, Object> getVolumeInfoMap();
/** @return a list of block pools. */
public String[] getBlockPoolList();
/** @return a list of finalized blocks for the given block pool. */
public List<Block> getFinalizedBlocks(String bpid);
/**
* Check whether the in-memory block record matches the block on the disk,
* and, in case that they are not matched, update the record or mark it
* as corrupted.
*/
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FSVolumeInterface vol);
/** /**
* Returns the length of the metadata file of the specified block * Returns the length of the metadata file of the specified block

View File

@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/** /**
* This class describes a replica that has been finalized. * This class describes a replica that has been finalized.
@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaInfo {
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
FinalizedReplica(long blockId, long len, long genStamp, FinalizedReplica(long blockId, long len, long genStamp,
FSVolume vol, File dir) { FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir); super(blockId, len, genStamp, vol, dir);
} }
@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaInfo {
* @param vol volume where replica is located * @param vol volume where replica is located
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
FinalizedReplica(Block block, FSVolume vol, File dir) { FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir); super(block, vol, dir);
} }

View File

@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/** This class represents replicas being written. /** This class represents replicas being written.
* Those are the replicas that * Those are the replicas that
@ -36,7 +36,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
ReplicaBeingWritten(long blockId, long genStamp, ReplicaBeingWritten(long blockId, long genStamp,
FSVolume vol, File dir) { FSVolumeInterface vol, File dir) {
super( blockId, genStamp, vol, dir); super( blockId, genStamp, vol, dir);
} }
@ -48,7 +48,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param writer a thread that is writing to this replica * @param writer a thread that is writing to this replica
*/ */
ReplicaBeingWritten(Block block, ReplicaBeingWritten(Block block,
FSVolume vol, File dir, Thread writer) { FSVolumeInterface vol, File dir, Thread writer) {
super( block, vol, dir, writer); super( block, vol, dir, writer);
} }
@ -62,7 +62,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param writer a thread that is writing to this replica * @param writer a thread that is writing to this replica
*/ */
ReplicaBeingWritten(long blockId, long len, long genStamp, ReplicaBeingWritten(long blockId, long len, long genStamp,
FSVolume vol, File dir, Thread writer ) { FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir, writer); super( blockId, len, genStamp, vol, dir, writer);
} }

View File

@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param state replica state * @param state replica state
*/ */
ReplicaInPipeline(long blockId, long genStamp, ReplicaInPipeline(long blockId, long genStamp,
FSVolume vol, File dir) { FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread()); this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
} }
@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param writer a thread that is writing to this replica * @param writer a thread that is writing to this replica
*/ */
ReplicaInPipeline(Block block, ReplicaInPipeline(Block block,
FSVolume vol, File dir, Thread writer) { FSVolumeInterface vol, File dir, Thread writer) {
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer); vol, dir, writer);
} }
@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param writer a thread that is writing to this replica * @param writer a thread that is writing to this replica
*/ */
ReplicaInPipeline(long blockId, long len, long genStamp, ReplicaInPipeline(long blockId, long len, long genStamp,
FSVolume vol, File dir, Thread writer ) { FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir); super( blockId, len, genStamp, vol, dir);
this.bytesAcked = len; this.bytesAcked = len;
this.bytesOnDisk = len; this.bytesOnDisk = len;

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
/** /**
@ -35,8 +35,10 @@ import org.apache.hadoop.io.IOUtils;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
abstract public class ReplicaInfo extends Block implements Replica { abstract public class ReplicaInfo extends Block implements Replica {
private FSVolume volume; // volume where the replica belongs /** volume where the replica belongs */
private File dir; // directory where block & meta files belong private FSVolumeInterface volume;
/** directory where block & meta files belong */
private File dir;
/** /**
* Constructor for a zero length replica * Constructor for a zero length replica
@ -45,7 +47,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param vol volume where replica is located * @param vol volume where replica is located
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
ReplicaInfo(long blockId, long genStamp, FSVolume vol, File dir) { ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir); this( blockId, 0L, genStamp, vol, dir);
} }
@ -55,7 +57,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param vol volume where replica is located * @param vol volume where replica is located
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
ReplicaInfo(Block block, FSVolume vol, File dir) { ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
this(block.getBlockId(), block.getNumBytes(), this(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), vol, dir); block.getGenerationStamp(), vol, dir);
} }
@ -69,7 +71,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
ReplicaInfo(long blockId, long len, long genStamp, ReplicaInfo(long blockId, long len, long genStamp,
FSVolume vol, File dir) { FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp); super(blockId, len, genStamp);
this.volume = vol; this.volume = vol;
this.dir = dir; this.dir = dir;
@ -111,14 +113,14 @@ abstract public class ReplicaInfo extends Block implements Replica {
* Get the volume where this replica is located on disk * Get the volume where this replica is located on disk
* @return the volume where this replica is located on disk * @return the volume where this replica is located on disk
*/ */
FSVolume getVolume() { FSVolumeInterface getVolume() {
return volume; return volume;
} }
/** /**
* Set the volume where this replica is located on disk * Set the volume where this replica is located on disk
*/ */
void setVolume(FSVolume vol) { void setVolume(FSVolumeInterface vol) {
this.volume = vol; this.volume = vol;
} }
@ -162,7 +164,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* be recovered (especially on Windows) on datanode restart. * be recovered (especially on Windows) on datanode restart.
*/ */
private void unlinkFile(File file, Block b) throws IOException { private void unlinkFile(File file, Block b) throws IOException {
File tmpFile = DatanodeUtil.createTmpFile(b, FSDataset.getUnlinkTmpFile(file)); File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
try { try {
FileInputStream in = new FileInputStream(file); FileInputStream in = new FileInputStream(file);
try { try {

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File; import java.io.File;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
/** /**
@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends ReplicaInfo {
} }
@Override //ReplicaInfo @Override //ReplicaInfo
void setVolume(FSVolume vol) { void setVolume(FSVolumeInterface vol) {
super.setVolume(vol); super.setVolume(vol);
original.setVolume(vol); original.setVolume(vol);
} }

View File

@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/** /**
* This class represents a replica that is waiting to be recovered. * This class represents a replica that is waiting to be recovered.
@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp, ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
FSVolume vol, File dir) { FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir); super(blockId, len, genStamp, vol, dir);
} }
@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
* @param vol volume where replica is located * @param vol volume where replica is located
* @param dir directory path where block and meta files are located * @param dir directory path where block and meta files are located
*/ */
ReplicaWaitingToBeRecovered(Block block, FSVolume vol, File dir) { ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir); super(block, vol, dir);
} }

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy { public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
@ -28,8 +28,8 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
private int curVolume = 0; private int curVolume = 0;
@Override @Override
public synchronized FSVolume chooseVolume(List<FSVolume> volumes, long blockSize) public synchronized FSVolumeInterface chooseVolume(
throws IOException { List<FSVolumeInterface> volumes, long blockSize) throws IOException {
if(volumes.size() < 1) { if(volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes"); throw new DiskOutOfSpaceException("No more available volumes");
} }
@ -44,7 +44,7 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
long maxAvailable = 0; long maxAvailable = 0;
while (true) { while (true) {
FSVolume volume = volumes.get(curVolume); FSVolumeInterface volume = volumes.get(curVolume);
curVolume = (curVolume + 1) % volumes.size(); curVolume = (curVolume + 1) % volumes.size();
long availableVolumeSize = volume.getAvailable(); long availableVolumeSize = volume.getAvailable();
if (availableVolumeSize > blockSize) { return volume; } if (availableVolumeSize > blockSize) { return volume; }

View File

@ -153,8 +153,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
this.metrics = NameNode.getNameNodeMetrics(); this.metrics = NameNode.getNameNodeMetrics();
int handlerCount = int handlerCount =
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
DFS_DATANODE_HANDLER_COUNT_DEFAULT); DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
InetSocketAddress socAddr = nn.getRpcServerAddress(conf); InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class); ProtobufRpcEngine.class);

View File

@ -1127,7 +1127,7 @@ public class DFSAdmin extends FsShell {
private ClientDatanodeProtocol getDataNodeProxy(String datanode) private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException { throws IOException {
InetSocketAddress datanodeAddr = DFSUtil.getSocketAddress(datanode); InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration // Get the current configuration
Configuration conf = getConf(); Configuration conf = getConf();

View File

@ -21,7 +21,7 @@
<property name="aspectversion" value="1.6.5"/> <property name="aspectversion" value="1.6.5"/>
<!-- TODO this has to be changed synchronously with build.xml version prop.--> <!-- TODO this has to be changed synchronously with build.xml version prop.-->
<!-- this workarounds of test-patch setting its own 'version' --> <!-- this workarounds of test-patch setting its own 'version' -->
<property name="project.version" value="0.23.0-SNAPSHOT"/> <property name="project.version" value="0.23.2-SNAPSHOT"/>
<!-- Properties common for all fault injections --> <!-- Properties common for all fault injections -->
<property name="build-fi.dir" value="${basedir}/build-fi"/> <property name="build-fi.dir" value="${basedir}/build-fi"/>

View File

@ -17,12 +17,14 @@
*/ */
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
@ -38,11 +40,10 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@ -988,8 +989,33 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
} }
@Override @Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
throws IOException { throw new UnsupportedOperationException();
throw new IOException("getBlockLocalPathInfo not supported."); }
@Override
public String[] getBlockPoolList() {
throw new UnsupportedOperationException();
}
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FSVolumeInterface vol) {
throw new UnsupportedOperationException();
}
@Override
public List<FSVolumeInterface> getVolumes() {
throw new UnsupportedOperationException();
}
@Override
public List<Block> getFinalizedBlocks(String bpid) {
throw new UnsupportedOperationException();
}
@Override
public Map<String, Object> getVolumeInfoMap() {
throw new UnsupportedOperationException();
} }
} }

View File

@ -23,7 +23,7 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNotSame;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.Assert; import org.junit.Assert;
@ -81,11 +80,11 @@ public class TestDataNodeMultipleRegistrations {
// check number of volumes in fsdataset // check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0); DataNode dn = cluster.getDataNodes().get(0);
Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo(); final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
assertNotNull("No volumes in the fsdataset", volInfos); Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0; int i = 0;
for (VolumeInfo vi : volInfos) { for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace); LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
} }
// number of volumes should be 2 - [data1, data2] // number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size()); assertEquals("number of volumes is wrong", 2, volInfos.size());
@ -143,11 +142,11 @@ public class TestDataNodeMultipleRegistrations {
// check number of vlumes in fsdataset // check number of vlumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0); DataNode dn = cluster.getDataNodes().get(0);
Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo(); final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
assertNotNull("No volumes in the fsdataset", volInfos); Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0; int i = 0;
for (VolumeInfo vi : volInfos) { for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace); LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
} }
// number of volumes should be 2 - [data1, data2] // number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size()); assertEquals("number of volumes is wrong", 2, volInfos.size());

View File

@ -17,6 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File; import java.io.File;
import java.io.FilenameFilter; import java.io.FilenameFilter;
import java.io.IOException; import java.io.IOException;
@ -29,8 +32,8 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory; import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -43,13 +46,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.*;
/** /**
* Fine-grain testing of block files and locations after volume failure. * Fine-grain testing of block files and locations after volume failure.
@ -274,7 +274,6 @@ public class TestDataNodeVolumeFailure {
String file = BlockReaderFactory.getFileName(targetAddr, String file = BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid", "test-blockpoolid",
block.getBlockId()); block.getBlockId());
BlockReader blockReader =
BlockReaderFactory.newBlockReader(conf, s, file, block, lblock BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
.getBlockToken(), 0, -1); .getBlockToken(), 0, -1);
@ -372,7 +371,7 @@ public class TestDataNodeVolumeFailure {
new FilenameFilter() { new FilenameFilter() {
public boolean accept(File dir, String name) { public boolean accept(File dir, String name) {
return name.startsWith("blk_") && return name.startsWith("blk_") &&
name.endsWith(FSDataset.METADATA_EXTENSION); name.endsWith(DatanodeUtil.METADATA_EXTENSION);
} }
} }
); );

View File

@ -30,17 +30,17 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.junit.Test;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test;
/** Test if a datanode can correctly upgrade itself */ /** Test if a datanode can correctly upgrade itself */
public class TestDatanodeRestart { public class TestDatanodeRestart {
@ -98,8 +98,9 @@ public class TestDatanodeRestart {
out.write(writeBuf); out.write(writeBuf);
out.hflush(); out.hflush();
DataNode dn = cluster.getDataNodes().get(0); DataNode dn = cluster.getDataNodes().get(0);
for (FSVolume volume : ((FSDataset)dn.data).volumes.getVolumes()) { for (FSVolumeInterface v : dn.data.getVolumes()) {
File currentDir = volume.getDir().getParentFile(); FSVolume volume = (FSVolume)v;
File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
File rbwDir = new File(currentDir, "rbw"); File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) { for (File file : rbwDir.listFiles()) {
if (isCorrupt && Block.isBlockFilename(file)) { if (isCorrupt && Block.isBlockFilename(file)) {
@ -188,7 +189,7 @@ public class TestDatanodeRestart {
} else { } else {
src = replicaInfo.getMetaFile(); src = replicaInfo.getMetaFile();
} }
File dst = FSDataset.getUnlinkTmpFile(src); File dst = DatanodeUtil.getUnlinkTmpFile(src);
if (isRename) { if (isRename) {
src.renameTo(dst); src.renameTo(dst);
} else { } else {

View File

@ -25,20 +25,20 @@ import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import junit.framework.TestCase;
/** /**
* Tests {@link DirectoryScanner} handling of differences * Tests {@link DirectoryScanner} handling of differences
@ -142,10 +142,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create a block file in a random volume*/ /** Create a block file in a random volume*/
private long createBlockFile() throws IOException { private long createBlockFile() throws IOException {
List<FSVolume> volumes = fds.volumes.getVolumes(); List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1); int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId(); long id = getFreeBlockId();
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir(); File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id)); File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) { if (file.createNewFile()) {
LOG.info("Created block file " + file.getName()); LOG.info("Created block file " + file.getName());
@ -155,10 +155,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create a metafile in a random volume*/ /** Create a metafile in a random volume*/
private long createMetaFile() throws IOException { private long createMetaFile() throws IOException {
List<FSVolume> volumes = fds.volumes.getVolumes(); List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1); int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId(); long id = getFreeBlockId();
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir(); File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getMetaFile(id)); File file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) { if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName()); LOG.info("Created metafile " + file.getName());
@ -168,10 +168,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create block file and corresponding metafile in a rondom volume */ /** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException { private long createBlockMetaFile() throws IOException {
List<FSVolume> volumes = fds.volumes.getVolumes(); List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1); int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId(); long id = getFreeBlockId();
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir(); File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id)); File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) { if (file.createNewFile()) {
LOG.info("Created block file " + file.getName()); LOG.info("Created block file " + file.getName());

View File

@ -21,10 +21,10 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import junit.framework.Assert; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -33,14 +33,14 @@ public class TestRoundRobinVolumesPolicy {
// Test the Round-Robin block-volume choosing algorithm. // Test the Round-Robin block-volume choosing algorithm.
@Test @Test
public void testRR() throws Exception { public void testRR() throws Exception {
final List<FSVolume> volumes = new ArrayList<FSVolume>(); final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
// First volume, with 100 bytes of space. // First volume, with 100 bytes of space.
volumes.add(Mockito.mock(FSVolume.class)); volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L); Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
// Second volume, with 200 bytes of space. // Second volume, with 200 bytes of space.
volumes.add(Mockito.mock(FSVolume.class)); volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L); Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance( RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
@ -69,14 +69,14 @@ public class TestRoundRobinVolumesPolicy {
@Test @Test
public void testRRPolicyExceptionMessage() public void testRRPolicyExceptionMessage()
throws Exception { throws Exception {
final List<FSVolume> volumes = new ArrayList<FSVolume>(); final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
// First volume, with 500 bytes of space. // First volume, with 500 bytes of space.
volumes.add(Mockito.mock(FSVolume.class)); volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L); Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
// Second volume, with 600 bytes of space. // Second volume, with 600 bytes of space.
volumes.add(Mockito.mock(FSVolume.class)); volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L); Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy(); RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();

View File

@ -140,7 +140,7 @@ public class TestWriteToReplica {
ReplicasMap replicasMap = dataSet.volumeMap; ReplicasMap replicasMap = dataSet.volumeMap;
FSVolume vol = dataSet.volumes.getNextVolume(0); FSVolume vol = dataSet.volumes.getNextVolume(0);
ReplicaInfo replicaInfo = new FinalizedReplica( ReplicaInfo replicaInfo = new FinalizedReplica(
blocks[FINALIZED].getLocalBlock(), vol, vol.getDir()); blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
replicasMap.add(bpid, replicaInfo); replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile(); replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile(); replicaInfo.getMetaFile().createNewFile();
@ -160,15 +160,15 @@ public class TestWriteToReplica {
blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid, blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
blocks[RWR].getLocalBlock()).getParentFile())); blocks[RWR].getLocalBlock()).getParentFile()));
replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR] replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
.getLocalBlock(), vol, vol.getDir()), 2007)); .getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));
return blocks; return blocks;
} }
private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException { private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1; long newGS = blocks[FINALIZED].getGenerationStamp()+1;
FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock()) final FSVolume v = (FSVolume)dataSet.volumeMap.get(
.getVolume(); bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
long available = v.getCapacity()-v.getDfsUsed(); long available = v.getCapacity()-v.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes(); long expectedLen = blocks[FINALIZED].getNumBytes();
try { try {

View File

@ -34,6 +34,7 @@ import java.util.Properties;
import java.util.Set; import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@ -59,6 +60,8 @@ import static org.mockito.Mockito.mock;
*/ */
public abstract class FSImageTestUtil { public abstract class FSImageTestUtil {
public static final Log LOG = LogFactory.getLog(FSImageTestUtil.class);
/** /**
* The position in the fsimage header where the txid is * The position in the fsimage header where the txid is
* written. * written.
@ -369,6 +372,8 @@ public abstract class FSImageTestUtil {
List<Integer> txids) { List<Integer> txids) {
for (File nameDir : getNameNodeCurrentDirs(cluster)) { for (File nameDir : getNameNodeCurrentDirs(cluster)) {
LOG.info("examining name dir with files: " +
Joiner.on(",").join(nameDir.listFiles()));
// Should have fsimage_N for the three checkpoints // Should have fsimage_N for the three checkpoints
for (long checkpointTxId : txids) { for (long checkpointTxId : txids) {
File image = new File(nameDir, File image = new File(nameDir,

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
@ -38,15 +40,15 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier; import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import junit.framework.TestCase; public class TestBackupNode {
public class TestBackupNode extends TestCase {
public static final Log LOG = LogFactory.getLog(TestBackupNode.class); public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
@ -57,8 +59,8 @@ public class TestBackupNode extends TestCase {
static final String BASE_DIR = MiniDFSCluster.getBaseDirectory(); static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
protected void setUp() throws Exception { @Before
super.setUp(); public void setUp() throws Exception {
File baseDir = new File(BASE_DIR); File baseDir = new File(BASE_DIR);
if(baseDir.exists()) if(baseDir.exists())
if(!(FileUtil.fullyDelete(baseDir))) if(!(FileUtil.fullyDelete(baseDir)))
@ -89,8 +91,7 @@ public class TestBackupNode extends TestCase {
return (BackupNode)NameNode.createNameNode(new String[]{startupOpt.getName()}, c); return (BackupNode)NameNode.createNameNode(new String[]{startupOpt.getName()}, c);
} }
void waitCheckpointDone( void waitCheckpointDone(MiniDFSCluster cluster, long txid) {
MiniDFSCluster cluster, BackupNode backup, long txid) {
long thisCheckpointTxId; long thisCheckpointTxId;
do { do {
try { try {
@ -98,9 +99,8 @@ public class TestBackupNode extends TestCase {
"checkpoint txid should increase above " + txid); "checkpoint txid should increase above " + txid);
Thread.sleep(1000); Thread.sleep(1000);
} catch (Exception e) {} } catch (Exception e) {}
thisCheckpointTxId = backup.getFSImage().getStorage() thisCheckpointTxId = cluster.getNameNode().getFSImage().getStorage()
.getMostRecentCheckpointTxId(); .getMostRecentCheckpointTxId();
} while (thisCheckpointTxId < txid); } while (thisCheckpointTxId < txid);
// Check that the checkpoint got uploaded to NN successfully // Check that the checkpoint got uploaded to NN successfully
@ -108,6 +108,7 @@ public class TestBackupNode extends TestCase {
Collections.singletonList((int)thisCheckpointTxId)); Collections.singletonList((int)thisCheckpointTxId));
} }
@Test
public void testCheckpointNode() throws Exception { public void testCheckpointNode() throws Exception {
testCheckpoint(StartupOption.CHECKPOINT); testCheckpoint(StartupOption.CHECKPOINT);
} }
@ -117,6 +118,7 @@ public class TestBackupNode extends TestCase {
* and keep in sync, even while the NN rolls, checkpoints * and keep in sync, even while the NN rolls, checkpoints
* occur, etc. * occur, etc.
*/ */
@Test
public void testBackupNodeTailsEdits() throws Exception { public void testBackupNodeTailsEdits() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
@ -234,6 +236,7 @@ public class TestBackupNode extends TestCase {
FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.of("VERSION")); FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.of("VERSION"));
} }
@Test
public void testBackupNode() throws Exception { public void testBackupNode() throws Exception {
testCheckpoint(StartupOption.BACKUP); testCheckpoint(StartupOption.BACKUP);
} }
@ -270,7 +273,7 @@ public class TestBackupNode extends TestCase {
// //
long txid = cluster.getNameNodeRpc().getTransactionID(); long txid = cluster.getNameNodeRpc().getTransactionID();
backup = startBackupNode(conf, op, 1); backup = startBackupNode(conf, op, 1);
waitCheckpointDone(cluster, backup, txid); waitCheckpointDone(cluster, txid);
} catch(IOException e) { } catch(IOException e) {
LOG.error("Error in TestBackupNode:", e); LOG.error("Error in TestBackupNode:", e);
assertTrue(e.getLocalizedMessage(), false); assertTrue(e.getLocalizedMessage(), false);
@ -305,7 +308,7 @@ public class TestBackupNode extends TestCase {
// //
backup = startBackupNode(conf, op, 1); backup = startBackupNode(conf, op, 1);
long txid = cluster.getNameNodeRpc().getTransactionID(); long txid = cluster.getNameNodeRpc().getTransactionID();
waitCheckpointDone(cluster, backup, txid); waitCheckpointDone(cluster, txid);
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
fileSys.mkdirs(new Path("file_" + i)); fileSys.mkdirs(new Path("file_" + i));
@ -313,11 +316,11 @@ public class TestBackupNode extends TestCase {
txid = cluster.getNameNodeRpc().getTransactionID(); txid = cluster.getNameNodeRpc().getTransactionID();
backup.doCheckpoint(); backup.doCheckpoint();
waitCheckpointDone(cluster, backup, txid); waitCheckpointDone(cluster, txid);
txid = cluster.getNameNodeRpc().getTransactionID(); txid = cluster.getNameNodeRpc().getTransactionID();
backup.doCheckpoint(); backup.doCheckpoint();
waitCheckpointDone(cluster, backup, txid); waitCheckpointDone(cluster, txid);
} catch(IOException e) { } catch(IOException e) {
LOG.error("Error in TestBackupNode:", e); LOG.error("Error in TestBackupNode:", e);

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath> <relativePath>../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-project</artifactId> <artifactId>hadoop-hdfs-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop HDFS Project</description> <description>Apache Hadoop HDFS Project</description>
<name>Apache Hadoop HDFS Project</name> <name>Apache Hadoop HDFS Project</name>
<packaging>pom</packaging> <packaging>pom</packaging>

View File

@ -26,7 +26,17 @@ Release 0.23-PB - Unreleased
MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch
for HDFS-2895. (Suresh Srinivas via vinodkv) for HDFS-2895. (Suresh Srinivas via vinodkv)
Release 0.23.1 - Unreleased Release 0.23.2 - UNRELEASED
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
Release 0.23.1 - 2012-02-08
NEW FEATURES NEW FEATURES
@ -48,6 +58,8 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3375. [Gridmix] Memory Emulation system tests. MAPREDUCE-3375. [Gridmix] Memory Emulation system tests.
(Vinay Thota via amarrk) (Vinay Thota via amarrk)
MAPREDUCE-3840. JobEndNotifier doesn't use the proxyToUse during connecting
(Ravi Prakash via bobby)
MAPREDUCE-2733. [Gridmix] Gridmix3 cpu emulation system tests. MAPREDUCE-2733. [Gridmix] Gridmix3 cpu emulation system tests.
(Vinay Thota via amarrk) (Vinay Thota via amarrk)
@ -80,6 +92,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3102. Changed NodeManager to fail fast when LinuxContainerExecutor MAPREDUCE-3102. Changed NodeManager to fail fast when LinuxContainerExecutor
has wrong configuration or permissions. (Hitesh Shah via vinodkv) has wrong configuration or permissions. (Hitesh Shah via vinodkv)
MAPREDUCE-3415. improve MiniMRYarnCluster & DistributedShell JAR
resolution. (tucu)
MAPREDUCE-3169. Create a new MiniMRCluster equivalent which only provides MAPREDUCE-3169. Create a new MiniMRCluster equivalent which only provides
client APIs cross MR1 and MR2. (Ahmed via tucu) client APIs cross MR1 and MR2. (Ahmed via tucu)
@ -221,6 +236,8 @@ Release 0.23.1 - Unreleased
acmurthy) acmurthy)
BUG FIXES BUG FIXES
MAPREDUCE-3770. Zombie.getJobConf() results into NPE. (amarrk)
MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks
(Dave Thompson via bobby) (Dave Thompson via bobby)
@ -721,6 +738,36 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3709. TestDistributedShell is failing. (Hitesh Shah via MAPREDUCE-3709. TestDistributedShell is failing. (Hitesh Shah via
mahadev) mahadev)
MAPREDUCE-3436. JobHistory webapp address should use the host configured
in the jobhistory address. (Ahmed Radwan via sseth)
MAPREDUCE-3815. Fixed MR AM to always use hostnames and never IPs when
requesting containers so that scheduler can give off data local containers
correctly. (Siddarth Seth via vinodkv)
MAPREDUCE-3833. Fixed a bug in reinitiaziling of queues. (Jason Lowe via
acmurthy)
MAPREDUCE-3826. Fixed a bug in RM web-ui which broke sorting. (Jonathan
Eagles via acmurthy)
MAPREDUCE-3823. Ensure counters are calculated only once after a job
finishes. (Vinod Kumar Vavilapalli via sseth)
MAPREDUCE-3827. Changed Counters to use ConcurrentSkipListMap for
performance. (vinodkv via acmurthy)
MAPREDUCE-3822. Changed FS counter computation to use all occurences of
the same FS scheme, instead of randomly using one. (Mahadev Konar via
sseth)
MAPREDUCE-3834. Changed MR AM to not add the same rack entry multiple times
into the container request table when multiple hosts for a split happen to
be on the same rack. (Siddarth Seth via vinodkv)
MAPREDUCE-3828. Ensure that urls in single-node mode are correct. (sseth
via acmurthy)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -32,7 +32,7 @@
<property name="Name" value="Hadoop-Mapred"/> <property name="Name" value="Hadoop-Mapred"/>
<property name="name" value="hadoop-${module}"/> <property name="name" value="hadoop-${module}"/>
<!-- Need to change aop.xml project.version prop. synchronously --> <!-- Need to change aop.xml project.version prop. synchronously -->
<property name="_version" value="0.23.1"/> <property name="_version" value="0.23.2"/>
<property name="version" value="${_version}-SNAPSHOT"/> <property name="version" value="${_version}-SNAPSHOT"/>
<property name="final.name" value="${name}-${version}"/> <property name="final.name" value="${name}-${version}"/>
<property name="test.final.name" value="${name}-test-${version}"/> <property name="test.final.name" value="${name}-test-${version}"/>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-mapreduce-client</artifactId> <artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-app</artifactId> <artifactId>hadoop-mapreduce-client-app</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-app</name> <name>hadoop-mapreduce-client-app</name>
<properties> <properties>
@ -112,7 +112,7 @@
<target> <target>
<symlink link="${applink.base}.jar" <symlink link="${applink.base}.jar"
resource="mr-app.jar" failonerror="false"/> resource="mr-app.jar" failonerror="false"/>
<symlink link="${applink.base}-0.23.0-SNAPSHOT.jar" <symlink link="${applink.base}-0.23.2-SNAPSHOT.jar"
resource="mr-app.jar" failonerror="false"/> resource="mr-app.jar" failonerror="false"/>
</target> </target>
</configuration> </configuration>

View File

@ -119,7 +119,8 @@ public class JobEndNotifier implements Configurable {
boolean success = false; boolean success = false;
try { try {
Log.info("Job end notification trying " + urlToNotify); Log.info("Job end notification trying " + urlToNotify);
HttpURLConnection conn = (HttpURLConnection) urlToNotify.openConnection(); HttpURLConnection conn =
(HttpURLConnection) urlToNotify.openConnection(proxyToUse);
conn.setConnectTimeout(5*1000); conn.setConnectTimeout(5*1000);
conn.setReadTimeout(5*1000); conn.setReadTimeout(5*1000);
conn.setAllowUserInteraction(false); conn.setAllowUserInteraction(false);

View File

@ -35,6 +35,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -106,7 +107,7 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
/** Implementation of Job interface. Maintains the state machines of Job. /** Implementation of Job interface. Maintains the state machines of Job.
* The read and write calls use ReadWriteLock for concurrency. * The read and write calls use ReadWriteLock for concurrency.
*/ */
@SuppressWarnings({ "rawtypes", "deprecation", "unchecked" }) @SuppressWarnings({ "rawtypes", "unchecked" })
public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
EventHandler<JobEvent> { EventHandler<JobEvent> {
@ -153,6 +154,10 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private boolean lazyTasksCopyNeeded = false; private boolean lazyTasksCopyNeeded = false;
volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>(); volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>();
private Counters jobCounters = new Counters(); private Counters jobCounters = new Counters();
private Object fullCountersLock = new Object();
private Counters fullCounters = null;
private Counters finalMapCounters = null;
private Counters finalReduceCounters = null;
// FIXME: // FIXME:
// //
// Can then replace task-level uber counters (MR-2424) with job-level ones // Can then replace task-level uber counters (MR-2424) with job-level ones
@ -473,11 +478,21 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
@Override @Override
public Counters getAllCounters() { public Counters getAllCounters() {
Counters counters = new Counters();
readLock.lock(); readLock.lock();
try { try {
JobState state = getState();
if (state == JobState.ERROR || state == JobState.FAILED
|| state == JobState.KILLED || state == JobState.SUCCEEDED) {
this.mayBeConstructFinalFullCounters();
return fullCounters;
}
Counters counters = new Counters();
counters.incrAllCounters(jobCounters); counters.incrAllCounters(jobCounters);
return incrTaskCounters(counters, tasks.values()); return incrTaskCounters(counters, tasks.values());
} finally { } finally {
readLock.unlock(); readLock.unlock();
} }
@ -525,17 +540,21 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
try { try {
JobState state = getState(); JobState state = getState();
// jobFile can be null if the job is not yet inited.
String jobFile =
remoteJobConfFile == null ? "" : remoteJobConfFile.toString();
if (getState() == JobState.NEW) { if (getState() == JobState.NEW) {
return MRBuilderUtils.newJobReport(jobId, jobName, username, state, return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f, appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f,
cleanupProgress, remoteJobConfFile.toString(), amInfos, isUber); cleanupProgress, jobFile, amInfos, isUber);
} }
computeProgress(); computeProgress();
return MRBuilderUtils.newJobReport(jobId, jobName, username, state, return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
appSubmitTime, startTime, finishTime, setupProgress, appSubmitTime, startTime, finishTime, setupProgress,
this.mapProgress, this.reduceProgress, this.mapProgress, this.reduceProgress,
cleanupProgress, remoteJobConfFile.toString(), amInfos, isUber); cleanupProgress, jobFile, amInfos, isUber);
} finally { } finally {
readLock.unlock(); readLock.unlock();
} }
@ -1143,26 +1162,49 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
// not be generated for KilledJobs, etc. // not be generated for KilledJobs, etc.
private static JobFinishedEvent createJobFinishedEvent(JobImpl job) { private static JobFinishedEvent createJobFinishedEvent(JobImpl job) {
Counters mapCounters = new Counters(); job.mayBeConstructFinalFullCounters();
Counters reduceCounters = new Counters();
for (Task t : job.tasks.values()) {
Counters counters = t.getCounters();
switch (t.getType()) {
case MAP: mapCounters.incrAllCounters(counters); break;
case REDUCE: reduceCounters.incrAllCounters(counters); break;
}
}
JobFinishedEvent jfe = new JobFinishedEvent( JobFinishedEvent jfe = new JobFinishedEvent(
job.oldJobId, job.finishTime, job.oldJobId, job.finishTime,
job.succeededMapTaskCount, job.succeededReduceTaskCount, job.succeededMapTaskCount, job.succeededReduceTaskCount,
job.failedMapTaskCount, job.failedReduceTaskCount, job.failedMapTaskCount, job.failedReduceTaskCount,
mapCounters, job.finalMapCounters,
reduceCounters, job.finalReduceCounters,
job.getAllCounters()); job.fullCounters);
return jfe; return jfe;
} }
private void mayBeConstructFinalFullCounters() {
// Calculating full-counters. This should happen only once for the job.
synchronized (this.fullCountersLock) {
if (this.fullCounters != null) {
// Already constructed. Just return.
return;
}
this.constructFinalFullcounters();
}
}
@Private
public void constructFinalFullcounters() {
this.fullCounters = new Counters();
this.finalMapCounters = new Counters();
this.finalReduceCounters = new Counters();
this.fullCounters.incrAllCounters(jobCounters);
for (Task t : this.tasks.values()) {
Counters counters = t.getCounters();
switch (t.getType()) {
case MAP:
this.finalMapCounters.incrAllCounters(counters);
break;
case REDUCE:
this.finalReduceCounters.incrAllCounters(counters);
break;
}
this.fullCounters.incrAllCounters(counters);
}
}
// Task-start has been moved out of InitTransition, so this arc simply // Task-start has been moved out of InitTransition, so this arc simply
// hardcodes 0 for both map and reduce finished tasks. // hardcodes 0 for both map and reduce finished tasks.
private static class KillNewJobTransition private static class KillNewJobTransition

View File

@ -19,19 +19,24 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl; package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -142,7 +147,7 @@ public abstract class TaskAttemptImpl implements
protected final JobConf conf; protected final JobConf conf;
protected final Path jobFile; protected final Path jobFile;
protected final int partition; protected final int partition;
protected final EventHandler eventHandler; protected EventHandler eventHandler;
private final TaskAttemptId attemptId; private final TaskAttemptId attemptId;
private final Clock clock; private final Clock clock;
private final org.apache.hadoop.mapred.JobID oldJobId; private final org.apache.hadoop.mapred.JobID oldJobId;
@ -1056,7 +1061,7 @@ public abstract class TaskAttemptImpl implements
} }
} }
private static class RequestContainerTransition implements static class RequestContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> { SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
private final boolean rescheduled; private final boolean rescheduled;
public RequestContainerTransition(boolean rescheduled) { public RequestContainerTransition(boolean rescheduled) {
@ -1076,19 +1081,49 @@ public abstract class TaskAttemptImpl implements
taskAttempt.attemptId, taskAttempt.attemptId,
taskAttempt.resourceCapability)); taskAttempt.resourceCapability));
} else { } else {
int i = 0; Set<String> racks = new HashSet<String>();
String[] racks = new String[taskAttempt.dataLocalHosts.length];
for (String host : taskAttempt.dataLocalHosts) { for (String host : taskAttempt.dataLocalHosts) {
racks[i++] = RackResolver.resolve(host).getNetworkLocation(); racks.add(RackResolver.resolve(host).getNetworkLocation());
} }
taskAttempt.eventHandler.handle( taskAttempt.eventHandler.handle(new ContainerRequestEvent(
new ContainerRequestEvent(taskAttempt.attemptId, taskAttempt.attemptId, taskAttempt.resourceCapability, taskAttempt
taskAttempt.resourceCapability, .resolveHosts(taskAttempt.dataLocalHosts), racks
taskAttempt.dataLocalHosts, racks)); .toArray(new String[racks.size()])));
} }
} }
} }
protected String[] resolveHosts(String[] src) {
String[] result = new String[src.length];
for (int i = 0; i < src.length; i++) {
if (isIP(src[i])) {
result[i] = resolveHost(src[i]);
} else {
result[i] = src[i];
}
}
return result;
}
protected String resolveHost(String src) {
String result = src; // Fallback in case of failure.
try {
InetAddress addr = InetAddress.getByName(src);
result = addr.getHostName();
} catch (UnknownHostException e) {
LOG.warn("Failed to resolve address: " + src
+ ". Continuing to use the same.");
}
return result;
}
private static final Pattern ipPattern = // Pattern for matching ip
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}");
protected boolean isIP(String src) {
return ipPattern.matcher(src).matches();
}
private static class ContainerAssignedTransition implements private static class ContainerAssignedTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> { SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings({ "unchecked" }) @SuppressWarnings({ "unchecked" })

View File

@ -18,6 +18,10 @@
package org.apache.hadoop.mapreduce.v2.app; package org.apache.hadoop.mapreduce.v2.app;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.spy;
import java.util.Iterator; import java.util.Iterator;
import junit.framework.Assert; import junit.framework.Assert;
@ -35,6 +39,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.junit.Test; import org.junit.Test;
/** /**
@ -175,6 +180,41 @@ public class TestMRApp {
app.waitForState(job, JobState.ERROR); app.waitForState(job, JobState.ERROR);
} }
private final class MRAppWithSpiedJob extends MRApp {
private JobImpl spiedJob;
private MRAppWithSpiedJob(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@Override
protected Job createJob(Configuration conf) {
spiedJob = spy((JobImpl) super.createJob(conf));
((AppContext) getContext()).getAllJobs().put(spiedJob.getID(), spiedJob);
return spiedJob;
}
JobImpl getSpiedJob() {
return this.spiedJob;
}
}
@Test
public void testCountersOnJobFinish() throws Exception {
MRAppWithSpiedJob app =
new MRAppWithSpiedJob(1, 1, true, this.getClass().getName(), true);
JobImpl job = (JobImpl)app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
System.out.println(job.getAllCounters());
// Just call getCounters
job.getAllCounters();
job.getAllCounters();
// Should be called only once
verify(job, times(1)).constructFinalFullcounters();
}
@Test @Test
public void checkJobStateTypeConversion() { public void checkJobStateTypeConversion() {
//verify that all states can be converted without //verify that all states can be converted without
@ -200,5 +240,6 @@ public class TestMRApp {
t.testCommitPending(); t.testCommitPending();
t.testCompletedMapsForReduceSlowstart(); t.testCompletedMapsForReduceSlowstart();
t.testJobError(); t.testJobError();
t.testCountersOnJobFinish();
} }
} }

View File

@ -18,48 +18,40 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl; package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.io.IOException; import static org.mockito.Matchers.any;
import java.util.Map; import static org.mockito.Mockito.doNothing;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.Test;
import org.junit.Assert;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.verify; import java.io.IOException;
import static org.mockito.Mockito.anyString; import java.util.HashMap;
import static org.mockito.Mockito.any; import java.util.Map;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.Assert;
import org.junit.Test;
/** /**
* Tests various functions of the JobImpl class * Tests various functions of the JobImpl class
*/ */
@SuppressWarnings({"unchecked", "rawtypes"})
public class TestJobImpl { public class TestJobImpl {
@Test @Test
@ -107,6 +99,8 @@ public class TestJobImpl {
JobImpl.checkJobCompleteSuccess(mockJob)); JobImpl.checkJobCompleteSuccess(mockJob));
Assert.assertEquals("checkJobCompleteSuccess returns incorrect state", Assert.assertEquals("checkJobCompleteSuccess returns incorrect state",
JobImpl.checkJobCompleteSuccess(mockJob), JobState.SUCCEEDED); JobImpl.checkJobCompleteSuccess(mockJob), JobState.SUCCEEDED);
} }
@Test @Test
@ -139,6 +133,7 @@ public class TestJobImpl {
t.testJobNoTasksTransition(); t.testJobNoTasksTransition();
t.testCheckJobCompleteSuccess(); t.testCheckJobCompleteSuccess();
t.testCheckJobCompleteSuccessFailed(); t.testCheckJobCompleteSuccessFailed();
t.testCheckAccess();
} }
@Test @Test

View File

@ -18,30 +18,54 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl; package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import junit.framework.Assert; import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion; import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp; import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.SystemClock;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.junit.Test; import org.junit.Test;
import org.mockito.ArgumentCaptor;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public class TestTaskAttempt{ public class TestTaskAttempt{
@ -58,6 +82,96 @@ public class TestTaskAttempt{
testMRAppHistory(app); testMRAppHistory(app);
} }
@SuppressWarnings("rawtypes")
@Test
public void testSingleRackRequest() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct =
new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "host1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo =
new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt =
createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(mockTaskAttempt, mockTAEvent);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
ContainerRequestEvent cre =
(ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedRacks = cre.getRacks();
//Only a single occurance of /DefaultRack
assertEquals(1, requestedRacks.length);
}
@SuppressWarnings("rawtypes")
@Test
public void testHostResolveAttempt() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct =
new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "192.168.1.1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo =
new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt =
createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptImpl spyTa = spy(mockTaskAttempt);
when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(spyTa, mockTAEvent);
verify(spyTa).resolveHost(hosts[0]);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
Map<String, Boolean> expected = new HashMap<String, Boolean>();
expected.put("host1", true);
expected.put("host2", true);
expected.put("host3", true);
ContainerRequestEvent cre =
(ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedHosts = cre.getHosts();
for (String h : requestedHosts) {
expected.remove(h);
}
assertEquals(0, expected.size());
}
@SuppressWarnings("rawtypes")
private TaskAttemptImpl createMapTaskAttemptImplForTest(
EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo) {
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
Path jobFile = mock(Path.class);
JobConf jobConf = new JobConf();
OutputCommitter outputCommitter = mock(OutputCommitter.class);
Clock clock = new SystemClock();
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
taskSplitMetaInfo, jobConf, taListener, outputCommitter, null,
null, clock);
return taImpl;
}
private void testMRAppHistory(MRApp app) throws Exception { private void testMRAppHistory(MRApp app) throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
Job job = app.submit(conf); Job job = app.submit(conf);

View File

@ -59,7 +59,7 @@ import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@SuppressWarnings({ "rawtypes", "deprecation" }) @SuppressWarnings("rawtypes")
public class TestTaskImpl { public class TestTaskImpl {
private static final Log LOG = LogFactory.getLog(TestTaskImpl.class); private static final Log LOG = LogFactory.getLog(TestTaskImpl.class);

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-mapreduce-client</artifactId> <artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId> <artifactId>hadoop-mapreduce-client-common</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-common</name> <name>hadoop-mapreduce-client-common</name>
<properties> <properties>

View File

@ -24,6 +24,7 @@ import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.util.Calendar; import java.util.Calendar;
import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
@ -46,6 +47,9 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
public class JobHistoryUtils { public class JobHistoryUtils {
/** /**
@ -110,6 +114,9 @@ public class JobHistoryUtils {
public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX); public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX);
private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d"; private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d";
private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults();
private static final Joiner JOINER = Joiner.on("");
private static final PathFilter CONF_FILTER = new PathFilter() { private static final PathFilter CONF_FILTER = new PathFilter() {
@Override @Override
public boolean accept(Path path) { public boolean accept(Path path) {
@ -478,8 +485,16 @@ public class JobHistoryUtils {
public static String getHistoryUrl(Configuration conf, ApplicationId appId) public static String getHistoryUrl(Configuration conf, ApplicationId appId)
throws UnknownHostException { throws UnknownHostException {
//construct the history url for job //construct the history url for job
String hsAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, String addr = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS); JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
Iterator<String> it = ADDR_SPLITTER.split(addr).iterator();
it.next(); // ignore the bind host
String port = it.next();
// Use hs address to figure out the host for webapp
addr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
String host = ADDR_SPLITTER.split(addr).iterator().next();
String hsAddress = JOINER.join(host, ":", port);
InetSocketAddress address = NetUtils.createSocketAddr( InetSocketAddress address = NetUtils.createSocketAddr(
hsAddress, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT, hsAddress, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS); JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-mapreduce-client</artifactId> <artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId> <artifactId>hadoop-mapreduce-client-core</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-core</name> <name>hadoop-mapreduce-client-core</name>
<properties> <properties>

View File

@ -141,7 +141,7 @@ class MapTask extends Task {
private TaskReporter reporter; private TaskReporter reporter;
private long bytesInPrev = -1; private long bytesInPrev = -1;
private long bytesInCurr = -1; private long bytesInCurr = -1;
private final Statistics fsStats; private final List<Statistics> fsStats;
TrackedRecordReader(TaskReporter reporter, JobConf job) TrackedRecordReader(TaskReporter reporter, JobConf job)
throws IOException{ throws IOException{
@ -149,7 +149,7 @@ class MapTask extends Task {
fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ); fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
this.reporter = reporter; this.reporter = reporter;
Statistics matchedStats = null; List<Statistics> matchedStats = null;
if (this.reporter.getInputSplit() instanceof FileSplit) { if (this.reporter.getInputSplit() instanceof FileSplit) {
matchedStats = getFsStatistics(((FileSplit) this.reporter matchedStats = getFsStatistics(((FileSplit) this.reporter
.getInputSplit()).getPath(), job); .getInputSplit()).getPath(), job);
@ -210,8 +210,13 @@ class MapTask extends Task {
return reporter; return reporter;
} }
private long getInputBytes(Statistics stats) { private long getInputBytes(List<Statistics> stats) {
return stats == null ? 0 : stats.getBytesRead(); if (stats == null) return 0;
long bytesRead = 0;
for (Statistics stat: stats) {
bytesRead = bytesRead + stat.getBytesRead();
}
return bytesRead;
} }
} }
@ -426,7 +431,7 @@ class MapTask extends Task {
private final org.apache.hadoop.mapreduce.Counter inputRecordCounter; private final org.apache.hadoop.mapreduce.Counter inputRecordCounter;
private final org.apache.hadoop.mapreduce.Counter fileInputByteCounter; private final org.apache.hadoop.mapreduce.Counter fileInputByteCounter;
private final TaskReporter reporter; private final TaskReporter reporter;
private final Statistics fsStats; private final List<Statistics> fsStats;
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split, NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat, org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
@ -439,7 +444,7 @@ class MapTask extends Task {
this.fileInputByteCounter = reporter this.fileInputByteCounter = reporter
.getCounter(FileInputFormatCounter.BYTES_READ); .getCounter(FileInputFormatCounter.BYTES_READ);
Statistics matchedStats = null; List <Statistics> matchedStats = null;
if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) { if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split) matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
.getPath(), taskContext.getConfiguration()); .getPath(), taskContext.getConfiguration());
@ -498,8 +503,13 @@ class MapTask extends Task {
return result; return result;
} }
private long getInputBytes(Statistics stats) { private long getInputBytes(List<Statistics> stats) {
return stats == null ? 0 : stats.getBytesRead(); if (stats == null) return 0;
long bytesRead = 0;
for (Statistics stat: stats) {
bytesRead = bytesRead + stat.getBytesRead();
}
return bytesRead;
} }
} }
@ -554,7 +564,7 @@ class MapTask extends Task {
private final Counters.Counter mapOutputRecordCounter; private final Counters.Counter mapOutputRecordCounter;
private final Counters.Counter fileOutputByteCounter; private final Counters.Counter fileOutputByteCounter;
private final Statistics fsStats; private final List<Statistics> fsStats;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext, NewDirectOutputCollector(MRJobConfig jobContext,
@ -566,7 +576,7 @@ class MapTask extends Task {
fileOutputByteCounter = reporter fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN); .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
Statistics matchedStats = null; List<Statistics> matchedStats = null;
if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) { if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getOutputPath(taskContext), taskContext.getConfiguration()); .getOutputPath(taskContext), taskContext.getConfiguration());
@ -603,8 +613,13 @@ class MapTask extends Task {
} }
} }
private long getOutputBytes(Statistics stats) { private long getOutputBytes(List<Statistics> stats) {
return stats == null ? 0 : stats.getBytesWritten(); if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
} }
} }
@ -735,7 +750,7 @@ class MapTask extends Task {
private final Counters.Counter mapOutputRecordCounter; private final Counters.Counter mapOutputRecordCounter;
private final Counters.Counter fileOutputByteCounter; private final Counters.Counter fileOutputByteCounter;
private final Statistics fsStats; private final List<Statistics> fsStats;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical, public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical,
@ -750,7 +765,7 @@ class MapTask extends Task {
fileOutputByteCounter = reporter fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN); .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
Statistics matchedStats = null; List<Statistics> matchedStats = null;
if (outputFormat instanceof FileOutputFormat) { if (outputFormat instanceof FileOutputFormat) {
matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job); matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
} }
@ -785,8 +800,13 @@ class MapTask extends Task {
mapOutputRecordCounter.increment(1); mapOutputRecordCounter.increment(1);
} }
private long getOutputBytes(Statistics stats) { private long getOutputBytes(List<Statistics> stats) {
return stats == null ? 0 : stats.getBytesWritten(); if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
} }
} }

View File

@ -476,14 +476,14 @@ public class ReduceTask extends Task {
private final RecordWriter<K, V> real; private final RecordWriter<K, V> real;
private final org.apache.hadoop.mapred.Counters.Counter reduceOutputCounter; private final org.apache.hadoop.mapred.Counters.Counter reduceOutputCounter;
private final org.apache.hadoop.mapred.Counters.Counter fileOutputByteCounter; private final org.apache.hadoop.mapred.Counters.Counter fileOutputByteCounter;
private final Statistics fsStats; private final List<Statistics> fsStats;
@SuppressWarnings({ "deprecation", "unchecked" }) @SuppressWarnings({ "deprecation", "unchecked" })
public OldTrackingRecordWriter(ReduceTask reduce, JobConf job, public OldTrackingRecordWriter(ReduceTask reduce, JobConf job,
TaskReporter reporter, String finalName) throws IOException { TaskReporter reporter, String finalName) throws IOException {
this.reduceOutputCounter = reduce.reduceOutputCounter; this.reduceOutputCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter; this.fileOutputByteCounter = reduce.fileOutputByteCounter;
Statistics matchedStats = null; List<Statistics> matchedStats = null;
if (job.getOutputFormat() instanceof FileOutputFormat) { if (job.getOutputFormat() instanceof FileOutputFormat) {
matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job); matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
} }
@ -514,8 +514,13 @@ public class ReduceTask extends Task {
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev); fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
} }
private long getOutputBytes(Statistics stats) { private long getOutputBytes(List<Statistics> stats) {
return stats == null ? 0 : stats.getBytesWritten(); if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
} }
} }
@ -524,7 +529,7 @@ public class ReduceTask extends Task {
private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real; private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real;
private final org.apache.hadoop.mapreduce.Counter outputRecordCounter; private final org.apache.hadoop.mapreduce.Counter outputRecordCounter;
private final org.apache.hadoop.mapreduce.Counter fileOutputByteCounter; private final org.apache.hadoop.mapreduce.Counter fileOutputByteCounter;
private final Statistics fsStats; private final List<Statistics> fsStats;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce, NewTrackingRecordWriter(ReduceTask reduce,
@ -533,7 +538,7 @@ public class ReduceTask extends Task {
this.outputRecordCounter = reduce.reduceOutputCounter; this.outputRecordCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter; this.fileOutputByteCounter = reduce.fileOutputByteCounter;
Statistics matchedStats = null; List<Statistics> matchedStats = null;
if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) { if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getOutputPath(taskContext), taskContext.getConfiguration()); .getOutputPath(taskContext), taskContext.getConfiguration());
@ -566,8 +571,13 @@ public class ReduceTask extends Task {
outputRecordCounter.increment(1); outputRecordCounter.increment(1);
} }
private long getOutputBytes(Statistics stats) { private long getOutputBytes(List<Statistics> stats) {
return stats == null ? 0 : stats.getBytesWritten(); if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
} }
} }

View File

@ -24,6 +24,7 @@ import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean; import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
import java.text.NumberFormat; import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -326,14 +327,13 @@ abstract public class Task implements Writable, Configurable {
* the path. * the path.
* @return a Statistics instance, or null if none is found for the scheme. * @return a Statistics instance, or null if none is found for the scheme.
*/ */
protected static Statistics getFsStatistics(Path path, Configuration conf) throws IOException { protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
Statistics matchedStats = null; List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
path = path.getFileSystem(conf).makeQualified(path); path = path.getFileSystem(conf).makeQualified(path);
String scheme = path.toUri().getScheme(); String scheme = path.toUri().getScheme();
for (Statistics stats : FileSystem.getAllStatistics()) { for (Statistics stats : FileSystem.getAllStatistics()) {
if (stats.getScheme().equals(scheme)) { if (stats.getScheme().equals(scheme)) {
matchedStats = stats; matchedStats.add(stats);
break;
} }
} }
return matchedStats; return matchedStats;
@ -866,41 +866,53 @@ abstract public class Task implements Writable, Configurable {
* system and only creates the counters when they are needed. * system and only creates the counters when they are needed.
*/ */
class FileSystemStatisticUpdater { class FileSystemStatisticUpdater {
private FileSystem.Statistics stats; private List<FileSystem.Statistics> stats;
private Counters.Counter readBytesCounter, writeBytesCounter, private Counters.Counter readBytesCounter, writeBytesCounter,
readOpsCounter, largeReadOpsCounter, writeOpsCounter; readOpsCounter, largeReadOpsCounter, writeOpsCounter;
private String scheme;
FileSystemStatisticUpdater(FileSystem.Statistics stats) { FileSystemStatisticUpdater(List<FileSystem.Statistics> stats, String scheme) {
this.stats = stats; this.stats = stats;
this.scheme = scheme;
} }
void updateCounters() { void updateCounters() {
String scheme = stats.getScheme();
if (readBytesCounter == null) { if (readBytesCounter == null) {
readBytesCounter = counters.findCounter(scheme, readBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_READ); FileSystemCounter.BYTES_READ);
} }
readBytesCounter.setValue(stats.getBytesRead());
if (writeBytesCounter == null) { if (writeBytesCounter == null) {
writeBytesCounter = counters.findCounter(scheme, writeBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_WRITTEN); FileSystemCounter.BYTES_WRITTEN);
} }
writeBytesCounter.setValue(stats.getBytesWritten());
if (readOpsCounter == null) { if (readOpsCounter == null) {
readOpsCounter = counters.findCounter(scheme, readOpsCounter = counters.findCounter(scheme,
FileSystemCounter.READ_OPS); FileSystemCounter.READ_OPS);
} }
readOpsCounter.setValue(stats.getReadOps());
if (largeReadOpsCounter == null) { if (largeReadOpsCounter == null) {
largeReadOpsCounter = counters.findCounter(scheme, largeReadOpsCounter = counters.findCounter(scheme,
FileSystemCounter.LARGE_READ_OPS); FileSystemCounter.LARGE_READ_OPS);
} }
largeReadOpsCounter.setValue(stats.getLargeReadOps());
if (writeOpsCounter == null) { if (writeOpsCounter == null) {
writeOpsCounter = counters.findCounter(scheme, writeOpsCounter = counters.findCounter(scheme,
FileSystemCounter.WRITE_OPS); FileSystemCounter.WRITE_OPS);
} }
writeOpsCounter.setValue(stats.getWriteOps()); long readBytes = 0;
long writeBytes = 0;
long readOps = 0;
long largeReadOps = 0;
long writeOps = 0;
for (FileSystem.Statistics stat: stats) {
readBytes = readBytes + stat.getBytesRead();
writeBytes = writeBytes + stat.getBytesWritten();
readOps = readOps + stat.getReadOps();
largeReadOps = largeReadOps + stat.getLargeReadOps();
writeOps = writeOps + stat.getWriteOps();
}
readBytesCounter.setValue(readBytes);
writeBytesCounter.setValue(writeBytes);
readOpsCounter.setValue(readOps);
largeReadOpsCounter.setValue(largeReadOps);
writeOpsCounter.setValue(writeOps);
} }
} }
@ -911,12 +923,24 @@ abstract public class Task implements Writable, Configurable {
new HashMap<String, FileSystemStatisticUpdater>(); new HashMap<String, FileSystemStatisticUpdater>();
private synchronized void updateCounters() { private synchronized void updateCounters() {
Map<String, List<FileSystem.Statistics>> map = new
HashMap<String, List<FileSystem.Statistics>>();
for(Statistics stat: FileSystem.getAllStatistics()) { for(Statistics stat: FileSystem.getAllStatistics()) {
String uriScheme = stat.getScheme(); String uriScheme = stat.getScheme();
FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme); if (map.containsKey(uriScheme)) {
List<FileSystem.Statistics> list = map.get(uriScheme);
list.add(stat);
} else {
List<FileSystem.Statistics> list = new ArrayList<FileSystem.Statistics>();
list.add(stat);
map.put(uriScheme, list);
}
}
for (Map.Entry<String, List<FileSystem.Statistics>> entry: map.entrySet()) {
FileSystemStatisticUpdater updater = statisticUpdaters.get(entry.getKey());
if(updater==null) {//new FileSystem has been found in the cache if(updater==null) {//new FileSystem has been found in the cache
updater = new FileSystemStatisticUpdater(stat); updater = new FileSystemStatisticUpdater(entry.getValue(), entry.getKey());
statisticUpdaters.put(uriScheme, updater); statisticUpdaters.put(entry.getKey(), updater);
} }
updater.updateCounters(); updater.updateCounters();
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
/** /**
@ -73,6 +74,7 @@ public interface Counter extends Writable {
*/ */
void increment(long incr); void increment(long incr);
@Private
/** /**
* Return the underlying object if this is a facade. * Return the underlying object if this is a facade.
* @return the undelying object. * @return the undelying object.

View File

@ -22,11 +22,8 @@ import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -34,6 +31,8 @@ import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.util.ResourceBundles; import org.apache.hadoop.mapreduce.util.ResourceBundles;
import com.google.common.collect.Iterators;
/** /**
* An abstract class to provide common implementation of the * An abstract class to provide common implementation of the
* generic counter group in both mapred and mapreduce package. * generic counter group in both mapred and mapreduce package.
@ -46,7 +45,8 @@ public abstract class AbstractCounterGroup<T extends Counter>
private final String name; private final String name;
private String displayName; private String displayName;
private final Map<String, T> counters = Maps.newTreeMap(); private final ConcurrentMap<String, T> counters =
new ConcurrentSkipListMap<String, T>();
private final Limits limits; private final Limits limits;
public AbstractCounterGroup(String name, String displayName, public AbstractCounterGroup(String name, String displayName,
@ -80,7 +80,7 @@ public abstract class AbstractCounterGroup<T extends Counter>
@Override @Override
public synchronized T addCounter(String counterName, String displayName, public synchronized T addCounter(String counterName, String displayName,
long value) { long value) {
String saveName = limits.filterCounterName(counterName); String saveName = Limits.filterCounterName(counterName);
T counter = findCounterImpl(saveName, false); T counter = findCounterImpl(saveName, false);
if (counter == null) { if (counter == null) {
return addCounterImpl(saveName, displayName, value); return addCounterImpl(saveName, displayName, value);
@ -97,7 +97,9 @@ public abstract class AbstractCounterGroup<T extends Counter>
@Override @Override
public synchronized T findCounter(String counterName, String displayName) { public synchronized T findCounter(String counterName, String displayName) {
String saveName = limits.filterCounterName(counterName); // Take lock to avoid two threads not finding a counter and trying to add
// the same counter.
String saveName = Limits.filterCounterName(counterName);
T counter = findCounterImpl(saveName, false); T counter = findCounterImpl(saveName, false);
if (counter == null) { if (counter == null) {
return addCounterImpl(saveName, displayName, 0); return addCounterImpl(saveName, displayName, 0);
@ -106,10 +108,12 @@ public abstract class AbstractCounterGroup<T extends Counter>
} }
@Override @Override
public synchronized T findCounter(String counterName, boolean create) { public T findCounter(String counterName, boolean create) {
return findCounterImpl(limits.filterCounterName(counterName), create); return findCounterImpl(Limits.filterCounterName(counterName), create);
} }
// Lock the object. Cannot simply use concurrent constructs on the counters
// data-structure (like putIfAbsent) because of localization, limits etc.
private synchronized T findCounterImpl(String counterName, boolean create) { private synchronized T findCounterImpl(String counterName, boolean create) {
T counter = counters.get(counterName); T counter = counters.get(counterName);
if (counter == null && create) { if (counter == null && create) {
@ -142,8 +146,8 @@ public abstract class AbstractCounterGroup<T extends Counter>
protected abstract T newCounter(); protected abstract T newCounter();
@Override @Override
public synchronized Iterator<T> iterator() { public Iterator<T> iterator() {
return ImmutableSet.copyOf(counters.values()).iterator(); return counters.values().iterator();
} }
/** /**

View File

@ -18,19 +18,18 @@
package org.apache.hadoop.mapreduce.counters; package org.apache.hadoop.mapreduce.counters;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.getFrameworkGroupId;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.isFrameworkGroup;
import java.io.DataInput; import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentSkipListMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -40,7 +39,10 @@ import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.FileSystemCounter; import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.TaskCounter;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.*;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
/** /**
* An abstract class to provide common implementation for the Counters * An abstract class to provide common implementation for the Counters
@ -61,8 +63,10 @@ public abstract class AbstractCounters<C extends Counter,
* A cache from enum values to the associated counter. * A cache from enum values to the associated counter.
*/ */
private Map<Enum<?>, C> cache = Maps.newIdentityHashMap(); private Map<Enum<?>, C> cache = Maps.newIdentityHashMap();
private Map<String, G> fgroups = Maps.newTreeMap(); // framework & fs groups //framework & fs groups
private Map<String, G> groups = Maps.newTreeMap(); // other groups private Map<String, G> fgroups = new ConcurrentSkipListMap<String, G>();
// other groups
private Map<String, G> groups = new ConcurrentSkipListMap<String, G>();
private final CounterGroupFactory<C, G> groupFactory; private final CounterGroupFactory<C, G> groupFactory;
// For framework counter serialization without strings // For framework counter serialization without strings
@ -181,14 +185,13 @@ public abstract class AbstractCounters<C extends Counter,
* @return Set of counter names. * @return Set of counter names.
*/ */
public synchronized Iterable<String> getGroupNames() { public synchronized Iterable<String> getGroupNames() {
return Iterables.concat(ImmutableSet.copyOf(fgroups.keySet()), return Iterables.concat(fgroups.keySet(), groups.keySet());
ImmutableSet.copyOf(groups.keySet()));
} }
@Override @Override
public synchronized Iterator<G> iterator() { public Iterator<G> iterator() {
return Iterators.concat(ImmutableSet.copyOf(fgroups.values()).iterator(), return Iterators.concat(fgroups.values().iterator(),
ImmutableSet.copyOf(groups.values()).iterator()); groups.values().iterator());
} }
/** /**
@ -216,7 +219,7 @@ public abstract class AbstractCounters<C extends Counter,
private String filterGroupName(String oldName) { private String filterGroupName(String oldName) {
String newName = legacyMap.get(oldName); String newName = legacyMap.get(oldName);
if (newName == null) { if (newName == null) {
return limits.filterGroupName(oldName); return Limits.filterGroupName(oldName);
} }
LOG.warn("Group "+ oldName +" is deprecated. Use "+ newName +" instead"); LOG.warn("Group "+ oldName +" is deprecated. Use "+ newName +" instead");
return newName; return newName;

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.mapreduce.counters; package org.apache.hadoop.mapreduce.counters;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Counter;
@ -99,6 +100,7 @@ public interface CounterGroupBase<T extends Counter>
*/ */
void incrAllCounters(CounterGroupBase<T> rightGroup); void incrAllCounters(CounterGroupBase<T> rightGroup);
@Private
/** /**
* Exposes the underlying group type if a facade. * Exposes the underlying group type if a facade.
* @return the underlying object that this object is wrapping up. * @return the underlying object that this object is wrapping up.

View File

@ -42,11 +42,11 @@ public class Limits {
return name.length() > maxLen ? name.substring(0, maxLen - 1) : name; return name.length() > maxLen ? name.substring(0, maxLen - 1) : name;
} }
public String filterCounterName(String name) { public static String filterCounterName(String name) {
return filterName(name, COUNTER_NAME_MAX); return filterName(name, COUNTER_NAME_MAX);
} }
public String filterGroupName(String name) { public static String filterGroupName(String name) {
return filterName(name, GROUP_NAME_MAX); return filterName(name, GROUP_NAME_MAX);
} }

View File

@ -1262,4 +1262,18 @@
to the RM to fetch Application Status.</description> to the RM to fetch Application Status.</description>
</property> </property>
<!-- jobhistory properties -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>0.0.0.0:10020</value>
<description>MapReduce JobHistory Server host:port</description>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>0.0.0.0:19888</value>
<description>MapReduce JobHistory Server Web UI host:port</description>
</property>
</configuration> </configuration>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-mapreduce-client</artifactId> <artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-hs</artifactId> <artifactId>hadoop-mapreduce-client-hs</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-hs</name> <name>hadoop-mapreduce-client-hs</name>
<properties> <properties>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-mapreduce-client</artifactId> <artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId> <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-jobclient</name> <name>hadoop-mapreduce-client-jobclient</name>
<properties> <properties>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-mapreduce-client</artifactId> <artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-shuffle</artifactId> <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-shuffle</name> <name>hadoop-mapreduce-client-shuffle</name>
<properties> <properties>

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath> <relativePath>../../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client</artifactId> <artifactId>hadoop-mapreduce-client</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client</name> <name>hadoop-mapreduce-client</name>
<packaging>pom</packaging> <packaging>pom</packaging>

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath> <relativePath>../../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-examples</artifactId> <artifactId>hadoop-mapreduce-examples</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop MapReduce Examples</description> <description>Apache Hadoop MapReduce Examples</description>
<name>Apache Hadoop MapReduce Examples</name> <name>Apache Hadoop MapReduce Examples</name>
<packaging>jar</packaging> <packaging>jar</packaging>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn</artifactId> <artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId> <artifactId>hadoop-yarn-api</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-api</name> <name>hadoop-yarn-api</name>
<properties> <properties>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn-applications</artifactId> <artifactId>hadoop-yarn-applications</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-applications-distributedshell</artifactId> <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-applications-distributedshell</name> <name>hadoop-yarn-applications-distributedshell</name>
<properties> <properties>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn</artifactId> <artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-applications</artifactId> <artifactId>hadoop-yarn-applications</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-applications</name> <name>hadoop-yarn-applications</name>
<packaging>pom</packaging> <packaging>pom</packaging>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn</artifactId> <artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId> <artifactId>hadoop-yarn-common</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-common</name> <name>hadoop-yarn-common</name>
<properties> <properties>

View File

@ -20,8 +20,13 @@ package org.apache.hadoop.yarn.conf;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.base.Splitter; import com.google.common.base.Splitter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Iterator; import java.util.Iterator;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
public class YarnConfiguration extends Configuration { public class YarnConfiguration extends Configuration {
private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults(); private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults();
@ -543,7 +548,25 @@ public class YarnConfiguration extends Configuration {
// Use apps manager address to figure out the host for webapp // Use apps manager address to figure out the host for webapp
addr = conf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS); addr = conf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS);
String host = ADDR_SPLITTER.split(addr).iterator().next(); String host = ADDR_SPLITTER.split(addr).iterator().next();
return JOINER.join(host, ":", port); String rmAddress = JOINER.join(host, ":", port);
InetSocketAddress address = NetUtils.createSocketAddr(
rmAddress, DEFAULT_RM_WEBAPP_PORT, RM_WEBAPP_ADDRESS);
StringBuffer sb = new StringBuffer();
InetAddress resolved = address.getAddress();
if (resolved == null || resolved.isAnyLocalAddress() ||
resolved.isLoopbackAddress()) {
String lh = host;
try {
lh = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
//Ignore and fallback.
}
sb.append(lh);
} else {
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
return sb.toString();
} }
public static String getRMWebAppURL(Configuration conf) { public static String getRMWebAppURL(Configuration conf) {

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.yarn.conf; package org.apache.hadoop.yarn.conf;
import junit.framework.Assert; import junit.framework.Assert;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -32,20 +31,25 @@ public class TestYarnConfiguration {
String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf); String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf);
// shouldn't have a "/" on the end of the url as all the other uri routinnes // shouldn't have a "/" on the end of the url as all the other uri routinnes
// specifically add slashes and Jetty doesn't handle double slashes. // specifically add slashes and Jetty doesn't handle double slashes.
Assert.assertEquals("RM Web Url is not correct", "http://0.0.0.0:8088", Assert.assertNotSame("RM Web Url is not correct", "http://0.0.0.0:8088",
rmWebUrl); rmWebUrl);
} }
@Test @Test
public void testRMWebUrlSpecified() throws Exception { public void testRMWebUrlSpecified() throws Exception {
YarnConfiguration conf = new YarnConfiguration(); YarnConfiguration conf = new YarnConfiguration();
// seems a bit odd but right now we are forcing webapp for RM to be RM_ADDRESS // seems a bit odd but right now we are forcing webapp for RM to be
// RM_ADDRESS
// for host and use the port from the RM_WEBAPP_ADDRESS // for host and use the port from the RM_WEBAPP_ADDRESS
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "footesting:99110"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "fortesting:24543");
conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999"); conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999");
String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf); String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf);
Assert.assertEquals("RM Web Url is not correct", "http://rmtesting:99110", String[] parts = rmWebUrl.split(":");
rmWebUrl); Assert.assertEquals("RM Web URL Port is incrrect", 24543,
Integer.valueOf(parts[parts.length - 1]).intValue());
Assert.assertNotSame(
"RM Web Url not resolved correctly. Should not be rmtesting",
"http://rmtesting:24543", rmWebUrl);
} }
} }

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn-server</artifactId> <artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-common</artifactId> <artifactId>hadoop-yarn-server-common</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-common</name> <name>hadoop-yarn-server-common</name>
<properties> <properties>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn-server</artifactId> <artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-nodemanager</artifactId> <artifactId>hadoop-yarn-server-nodemanager</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-nodemanager</name> <name>hadoop-yarn-server-nodemanager</name>
<properties> <properties>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn-server</artifactId> <artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-resourcemanager</artifactId> <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-resourcemanager</name> <name>hadoop-yarn-server-resourcemanager</name>
<properties> <properties>

View File

@ -368,6 +368,12 @@ public class ParentQueue implements CSQueue {
ParentQueue parentQueue = (ParentQueue)queue; ParentQueue parentQueue = (ParentQueue)queue;
// Set new configs
setupQueueConfigs(clusterResource,
parentQueue.capacity, parentQueue.absoluteCapacity,
parentQueue.maximumCapacity, parentQueue.absoluteMaxCapacity,
parentQueue.state, parentQueue.acls);
// Re-configure existing child queues and add new ones // Re-configure existing child queues and add new ones
// The CS has already checked to ensure all existing child queues are present! // The CS has already checked to ensure all existing child queues are present!
Map<String, CSQueue> currentChildQueues = getQueues(childQueues); Map<String, CSQueue> currentChildQueues = getQueues(childQueues);
@ -389,12 +395,6 @@ public class ParentQueue implements CSQueue {
// Re-sort all queues // Re-sort all queues
childQueues.clear(); childQueues.clear();
childQueues.addAll(currentChildQueues.values()); childQueues.addAll(currentChildQueues.values());
// Set new configs
setupQueueConfigs(clusterResource,
parentQueue.capacity, parentQueue.absoluteCapacity,
parentQueue.maximumCapacity, parentQueue.absoluteMaxCapacity,
parentQueue.state, parentQueue.acls);
} }
Map<String, CSQueue> getQueues(Set<CSQueue> queues) { Map<String, CSQueue> getQueues(Set<CSQueue> queues) {

View File

@ -81,7 +81,7 @@ class AppsBlock extends HtmlBlock {
td(). td().
br().$title(startTime)._()._(startTime)._(). br().$title(startTime)._()._(startTime)._().
td(). td().
br().$title(startTime)._()._(finishTime)._(). br().$title(finishTime)._()._(finishTime)._().
td(appInfo.getState()). td(appInfo.getState()).
td(appInfo.getFinalStatus()). td(appInfo.getFinalStatus()).
td(). td().

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.Controller.RequestContext; import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
import org.apache.hadoop.yarn.webapp.ToJSON; import org.apache.hadoop.yarn.webapp.ToJSON;
import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render; import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render;
@ -60,7 +61,9 @@ class AppsList implements ToJSON {
&& app.getState() != RMAppState.valueOf(requiredAppState)) { && app.getState() != RMAppState.valueOf(requiredAppState)) {
continue; continue;
} }
AppInfo appInfo = new AppInfo(app, false); AppInfo appInfo = new AppInfo(app, true);
String startTime = Times.format(appInfo.getStartTime());
String finishTime = Times.format(appInfo.getFinishTime());
if (first) { if (first) {
first = false; first = false;
} else { } else {
@ -72,15 +75,15 @@ class AppsList implements ToJSON {
appInfo.getAppId()).append(_SEP). appInfo.getAppId()).append(_SEP).
append(escapeHtml(appInfo.getUser())).append(_SEP). append(escapeHtml(appInfo.getUser())).append(_SEP).
append(escapeJavaScript(escapeHtml(appInfo.getName()))).append(_SEP). append(escapeJavaScript(escapeHtml(appInfo.getName()))).append(_SEP).
append(escapeHtml(appInfo.getQueue())).append(_SEP). append(escapeHtml(appInfo.getQueue())).append(_SEP);
appendSortable(out, startTime).append(startTime).append(_SEP);
appendSortable(out, finishTime).append(finishTime).append(_SEP).
append(appInfo.getState()).append(_SEP). append(appInfo.getState()).append(_SEP).
append(appInfo.getFinalStatus()).append(_SEP); append(appInfo.getFinalStatus()).append(_SEP);
appendProgressBar(out, appInfo.getProgress()).append(_SEP); appendProgressBar(out, appInfo.getProgress()).append(_SEP);
appendLink(out, appInfo.getTrackingUI(), rc.prefix(), appendLink(out, appInfo.getTrackingUI(), rc.prefix(),
!appInfo.isTrackingUrlReady() ? !appInfo.isTrackingUrlReady() ?
"#" : appInfo.getTrackingUrlPretty()). "#" : appInfo.getTrackingUrlPretty()).
append(_SEP).append(escapeJavaScript(escapeHtml(
appInfo.getNote()))).
append("\"]"); append("\"]");
} }
out.append(']'); out.append(']');

View File

@ -18,11 +18,12 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import static org.junit.Assert.assertEquals;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import junit.framework.Assert; import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -47,6 +48,21 @@ import org.junit.Test;
public class TestCapacityScheduler { public class TestCapacityScheduler {
private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class); private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
private static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
private static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
private static final String A1 = A + ".a1";
private static final String A2 = A + ".a2";
private static final String B1 = B + ".b1";
private static final String B2 = B + ".b2";
private static final String B3 = B + ".b3";
private static int A_CAPACITY = 10;
private static int B_CAPACITY = 90;
private static int A1_CAPACITY = 30;
private static int A2_CAPACITY = 70;
private static int B1_CAPACITY = 50;
private static int B2_CAPACITY = 30;
private static int B3_CAPACITY = 20;
private ResourceManager resourceManager = null; private ResourceManager resourceManager = null;
@Before @Before
@ -200,35 +216,102 @@ public class TestCapacityScheduler {
conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"});
conf.setCapacity(CapacitySchedulerConfiguration.ROOT, 100); conf.setCapacity(CapacitySchedulerConfiguration.ROOT, 100);
final String A = CapacitySchedulerConfiguration.ROOT + ".a"; conf.setCapacity(A, A_CAPACITY);
conf.setCapacity(A, 10); conf.setCapacity(B, B_CAPACITY);
final String B = CapacitySchedulerConfiguration.ROOT + ".b";
conf.setCapacity(B, 90);
// Define 2nd-level queues // Define 2nd-level queues
final String A1 = A + ".a1";
final String A2 = A + ".a2";
conf.setQueues(A, new String[] {"a1", "a2"}); conf.setQueues(A, new String[] {"a1", "a2"});
conf.setCapacity(A1, 30); conf.setCapacity(A1, A1_CAPACITY);
conf.setUserLimitFactor(A1, 100.0f); conf.setUserLimitFactor(A1, 100.0f);
conf.setCapacity(A2, 70); conf.setCapacity(A2, A2_CAPACITY);
conf.setUserLimitFactor(A2, 100.0f); conf.setUserLimitFactor(A2, 100.0f);
final String B1 = B + ".b1";
final String B2 = B + ".b2";
final String B3 = B + ".b3";
conf.setQueues(B, new String[] {"b1", "b2", "b3"}); conf.setQueues(B, new String[] {"b1", "b2", "b3"});
conf.setCapacity(B1, 50); conf.setCapacity(B1, B1_CAPACITY);
conf.setUserLimitFactor(B1, 100.0f); conf.setUserLimitFactor(B1, 100.0f);
conf.setCapacity(B2, 30); conf.setCapacity(B2, B2_CAPACITY);
conf.setUserLimitFactor(B2, 100.0f); conf.setUserLimitFactor(B2, 100.0f);
conf.setCapacity(B3, 20); conf.setCapacity(B3, B3_CAPACITY);
conf.setUserLimitFactor(B3, 100.0f); conf.setUserLimitFactor(B3, 100.0f);
LOG.info("Setup top-level queues a and b"); LOG.info("Setup top-level queues a and b");
} }
@Test
public void testRefreshQueues() throws Exception {
CapacityScheduler cs = new CapacityScheduler();
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
setupQueueConfiguration(conf);
cs.reinitialize(conf, null, null);
checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY);
conf.setCapacity(A, 80);
conf.setCapacity(B, 20);
cs.reinitialize(conf, null,null);
checkQueueCapacities(cs, 80, 20);
}
private void checkQueueCapacities(CapacityScheduler cs,
int capacityA, int capacityB) {
CSQueue rootQueue = cs.getRootQueue();
CSQueue queueA = findQueue(rootQueue, A);
CSQueue queueB = findQueue(rootQueue, B);
CSQueue queueA1 = findQueue(queueA, A1);
CSQueue queueA2 = findQueue(queueA, A2);
CSQueue queueB1 = findQueue(queueB, B1);
CSQueue queueB2 = findQueue(queueB, B2);
CSQueue queueB3 = findQueue(queueB, B3);
float capA = capacityA / 100.0f;
float capB = capacityB / 100.0f;
checkQueueCapacity(queueA, capA, capA, 1.0f, 1.0f);
checkQueueCapacity(queueB, capB, capB, 1.0f, 1.0f);
checkQueueCapacity(queueA1, A1_CAPACITY / 100.0f,
(A1_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
checkQueueCapacity(queueA2, (float)A2_CAPACITY / 100.0f,
(A2_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
checkQueueCapacity(queueB1, (float)B1_CAPACITY / 100.0f,
(B1_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
checkQueueCapacity(queueB2, (float)B2_CAPACITY / 100.0f,
(B2_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
checkQueueCapacity(queueB3, (float)B3_CAPACITY / 100.0f,
(B3_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
}
private void checkQueueCapacity(CSQueue q, float expectedCapacity,
float expectedAbsCapacity, float expectedMaxCapacity,
float expectedAbsMaxCapacity) {
final float epsilon = 1e-5f;
assertEquals("capacity", expectedCapacity, q.getCapacity(), epsilon);
assertEquals("absolute capacity", expectedAbsCapacity,
q.getAbsoluteCapacity(), epsilon);
assertEquals("maximum capacity", expectedMaxCapacity,
q.getMaximumCapacity(), epsilon);
assertEquals("absolute maximum capacity", expectedAbsMaxCapacity,
q.getAbsoluteMaximumCapacity(), epsilon);
}
private CSQueue findQueue(CSQueue root, String queuePath) {
if (root.getQueuePath().equals(queuePath)) {
return root;
}
List<CSQueue> childQueues = root.getChildQueues();
if (childQueues != null) {
for (CSQueue q : childQueues) {
if (queuePath.startsWith(q.getQueuePath())) {
CSQueue result = findQueue(q, queuePath);
if (result != null) {
return result;
}
}
}
}
return null;
}
private void checkApplicationResourceUsage(int expected, private void checkApplicationResourceUsage(int expected,
Application application) { Application application) {
Assert.assertEquals(expected, application.getUsedResources().getMemory()); Assert.assertEquals(expected, application.getUsedResources().getMemory());

View File

@ -16,11 +16,11 @@
<parent> <parent>
<artifactId>hadoop-yarn-server</artifactId> <artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-tests</artifactId> <artifactId>hadoop-yarn-server-tests</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-tests</name> <name>hadoop-yarn-server-tests</name>
<properties> <properties>

View File

@ -16,7 +16,7 @@
<parent> <parent>
<artifactId>hadoop-yarn-server</artifactId> <artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn</artifactId> <artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server</artifactId> <artifactId>hadoop-yarn-server</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server</name> <name>hadoop-yarn-server</name>
<packaging>pom</packaging> <packaging>pom</packaging>

View File

@ -16,12 +16,12 @@
<parent> <parent>
<artifactId>hadoop-yarn</artifactId> <artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</parent> </parent>
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-site</artifactId> <artifactId>hadoop-yarn-site</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-site</name> <name>hadoop-yarn-site</name>
<properties> <properties>

View File

@ -11,7 +11,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file. limitations under the License. See accompanying LICENSE file.
--> -->
<project name="Apache Hadoop 0.23"> <project name="Apache Hadoop ${project.version}">
<skin> <skin>
<groupId>org.apache.maven.skins</groupId> <groupId>org.apache.maven.skins</groupId>

View File

@ -17,12 +17,12 @@
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId> <artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath> <relativePath>../../hadoop-project</relativePath>
</parent> </parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn</artifactId> <artifactId>hadoop-yarn</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
<packaging>pom</packaging> <packaging>pom</packaging>
<name>hadoop-yarn</name> <name>hadoop-yarn</name>

View File

@ -28,7 +28,7 @@
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<version>0.23.1-SNAPSHOT</version> <version>0.23.2-SNAPSHOT</version>
</dependency> </dependency>
</dependencies> </dependencies>
</project> </project>

Some files were not shown because too many files have changed in this diff Show More