Merge r1241554 through r1242605 from 0.23.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1242642 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-02-10 03:04:05 +00:00
commit 6c0178e7e5
116 changed files with 3733 additions and 766 deletions

View File

@ -20,12 +20,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-assemblies</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>Apache Hadoop Assemblies</name>
<description>Apache Hadoop Assemblies</description>

View File

@ -18,12 +18,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<packaging>jar</packaging>
<description>Apache Hadoop Client</description>

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Annotations</description>
<name>Apache Hadoop Annotations</name>
<packaging>jar</packaging>

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth-examples</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<packaging>war</packaging>
<name>Apache Hadoop Auth Examples</name>

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<packaging>jar</packaging>
<name>Apache Hadoop Auth</name>

View File

@ -63,7 +63,29 @@ Release 0.23-PB - Unreleased
HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
initialization (atm)
Release 0.23.1 - Unreleased
Release 0.23.2 - UNRELEASED
NEW FEATURES
IMPROVEMENTS
HADOOP-8032. mvn site:stage-deploy should be able to use the scp protocol
to stage documents (Ravi Prakash via tgraves)
HADOOP-7923. Automate the updating of version numbers in the doc system.
(szetszwo)
OPTIMIZATIONS
BUG FIXES
HADOOP-8042 When copying a file out of HDFS, modifying it, and uploading
it back into HDFS, the put fails due to a CRC mismatch
(Daryn Sharp via bobby)
HADOOP-8035 Hadoop Maven site is inefficient and runs phases redundantly
(abayer via tucu)
Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES
@ -166,8 +188,6 @@ Release 0.23.1 - Unreleased
HADOOP-7761. Improve the performance of raw comparisons. (todd)
BUG FIXES
HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum
(Daryn Sharp via bobby)
HADOOP-8018. Hudson auto test for HDFS has started throwing javadoc
(Jon Eagles via bobby)
@ -184,6 +204,14 @@ Release 0.23.1 - Unreleased
HADOOP-7811. TestUserGroupInformation#testGetServerSideGroups test fails in chroot.
(Jonathan Eagles via mahadev)
HADOOP-7813. Fix test-patch to use proper numerical comparison when checking
javadoc and findbugs warning counts. (Jonathan Eagles via tlipcon)
HADOOP-7841. Run tests with non-secure random. (tlipcon)
HADOOP-7851. Configuration.getClasses() never returns the default value.
(Uma Maheswara Rao G via amarrk)
HADOOP-7787. Make source tarball use conventional name.
(Bruno Mahé via tomwhite)
@ -277,6 +305,9 @@ Release 0.23.1 - Unreleased
HADOOP-8012. hadoop-daemon.sh and yarn-daemon.sh are trying to mkdir
and chown log/pid dirs which can fail. (Roman Shaposhnik via eli)
HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum
(Daryn Sharp via bobby)
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Common</description>
<name>Apache Hadoop Common</name>
<packaging>jar</packaging>

View File

@ -43,6 +43,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};
private int bytesPerChecksum = 512;
private boolean verifyChecksum = true;
private boolean writeChecksum = true;
public static double getApproxChkSumLength(long size) {
return ChecksumFSOutputSummer.CHKSUM_AS_FRACTION * size;
@ -67,6 +68,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
this.verifyChecksum = verifyChecksum;
}
@Override
public void setWriteChecksum(boolean writeChecksum) {
this.writeChecksum = writeChecksum;
}
/** get the raw file system */
public FileSystem getRawFileSystem() {
return fs;
@ -428,9 +434,20 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
throw new IOException("Mkdirs failed to create " + parent);
}
}
final FSDataOutputStream out = new FSDataOutputStream(
new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
blockSize, progress), null);
final FSDataOutputStream out;
if (writeChecksum) {
out = new FSDataOutputStream(
new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
blockSize, progress), null);
} else {
out = fs.create(f, permission, overwrite, bufferSize, replication,
blockSize, progress);
// remove the checksum file since we aren't writing one
Path checkFile = getChecksumFile(f);
if (fs.exists(checkFile)) {
fs.delete(checkFile, true);
}
}
if (permission != null) {
setPermission(f, permission);
}

View File

@ -1936,6 +1936,15 @@ public abstract class FileSystem extends Configured implements Closeable {
//doesn't do anything
}
/**
* Set the write checksum flag. This is only applicable if the
* corresponding FileSystem supports checksum. By default doesn't do anything.
* @param writeChecksum
*/
public void setWriteChecksum(boolean writeChecksum) {
//doesn't do anything
}
/**
* Return a list of file status objects that corresponds to the list of paths
* excluding those non-existent paths.

View File

@ -360,6 +360,11 @@ public class FilterFileSystem extends FileSystem {
public void setVerifyChecksum(boolean verifyChecksum) {
fs.setVerifyChecksum(verifyChecksum);
}
@Override
public void setWriteChecksum(boolean writeChecksum) {
fs.setVerifyChecksum(writeChecksum);
}
@Override
public Configuration getConf() {

View File

@ -41,7 +41,9 @@ import org.apache.hadoop.io.IOUtils;
*/
abstract class CommandWithDestination extends FsCommand {
protected PathData dst;
protected boolean overwrite = false;
private boolean overwrite = false;
private boolean verifyChecksum = true;
private boolean writeChecksum = true;
/**
*
@ -53,6 +55,14 @@ abstract class CommandWithDestination extends FsCommand {
overwrite = flag;
}
protected void setVerifyChecksum(boolean flag) {
verifyChecksum = flag;
}
protected void setWriteChecksum(boolean flag) {
writeChecksum = flag;
}
/**
* The last arg is expected to be a local path, if only one argument is
* given then the destination will be the current directory
@ -201,6 +211,7 @@ abstract class CommandWithDestination extends FsCommand {
* @throws IOException if copy fails
*/
protected void copyFileToTarget(PathData src, PathData target) throws IOException {
src.fs.setVerifyChecksum(verifyChecksum);
copyStreamToTarget(src.fs.open(src.path), target);
}
@ -217,6 +228,7 @@ abstract class CommandWithDestination extends FsCommand {
if (target.exists && (target.stat.isDirectory() || !overwrite)) {
throw new PathExistsException(target.toString());
}
target.fs.setWriteChecksum(writeChecksum);
PathData tempFile = null;
try {
tempFile = target.createTempFile(target+"._COPYING_");

View File

@ -25,7 +25,6 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FileUtil;
/** Various commands for copy files */
@ -103,43 +102,17 @@ class CopyCommands {
"to the local name. <src> is kept. When copying multiple,\n" +
"files, the destination must be a directory.";
/**
* The prefix for the tmp file used in copyToLocal.
* It must be at least three characters long, required by
* {@link java.io.File#createTempFile(String, String, File)}.
*/
private boolean copyCrc;
private boolean verifyChecksum;
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(
1, Integer.MAX_VALUE, "crc", "ignoreCrc");
cf.parse(args);
copyCrc = cf.getOpt("crc");
verifyChecksum = !cf.getOpt("ignoreCrc");
setWriteChecksum(cf.getOpt("crc"));
setVerifyChecksum(!cf.getOpt("ignoreCrc"));
setRecursive(true);
getLocalDestination(args);
}
@Override
protected void copyFileToTarget(PathData src, PathData target)
throws IOException {
src.fs.setVerifyChecksum(verifyChecksum);
if (copyCrc && !(src.fs instanceof ChecksumFileSystem)) {
displayWarning(src.fs + ": Does not support checksums");
copyCrc = false;
}
super.copyFileToTarget(src, target);
if (copyCrc) {
// should we delete real file if crc copy fails?
super.copyFileToTarget(src.getChecksumFile(), target.getChecksumFile());
}
}
}
/**

View File

@ -27,7 +27,6 @@ import java.net.URISyntaxException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
@ -169,19 +168,6 @@ public class PathData {
}
}
/**
* Return the corresponding crc data for a file. Avoids exposing the fs
* contortions to the caller.
* @return PathData of the crc file
* @throws IOException is anything goes wrong
*/
public PathData getChecksumFile() throws IOException {
checkIfExists(FileTypeRequirement.SHOULD_NOT_BE_DIRECTORY);
ChecksumFileSystem srcFs = (ChecksumFileSystem)fs;
Path srcPath = srcFs.getChecksumFile(path);
return new PathData(srcFs.getRawFileSystem(), srcPath.toString());
}
/**
* Returns a temporary file for this PathData with the given extension.
* The file will be deleted on exit.

View File

@ -470,6 +470,15 @@ public class ViewFileSystem extends FileSystem {
}
}
@Override
public void setWriteChecksum(final boolean writeChecksum) {
List<InodeTree.MountPoint<FileSystem>> mountPoints =
fsState.getMountPoints();
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
mount.target.targetFileSystem.setWriteChecksum(writeChecksum);
}
}
public MountPoint[] getMountPoints() {
List<InodeTree.MountPoint<FileSystem>> mountPoints =
fsState.getMountPoints();

View File

@ -0,0 +1,97 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFsShellCopy {
static Configuration conf;
static FsShell shell;
static LocalFileSystem lfs;
static Path testRootDir, srcPath, dstPath;
@BeforeClass
public static void setup() throws Exception {
conf = new Configuration();
shell = new FsShell(conf);
lfs = FileSystem.getLocal(conf);
testRootDir = new Path(
System.getProperty("test.build.data","test/build/data"), "testShellCopy");
lfs.mkdirs(testRootDir);
srcPath = new Path(testRootDir, "srcFile");
dstPath = new Path(testRootDir, "dstFile");
}
@Before
public void prepFiles() throws Exception {
lfs.setVerifyChecksum(true);
lfs.setWriteChecksum(true);
lfs.delete(srcPath, true);
lfs.delete(dstPath, true);
FSDataOutputStream out = lfs.create(srcPath);
out.writeChars("hi");
out.close();
assertTrue(lfs.exists(lfs.getChecksumFile(srcPath)));
}
@Test
public void testCopyNoCrc() throws Exception {
shellRun(0, "-get", srcPath.toString(), dstPath.toString());
checkPath(dstPath, false);
}
@Test
public void testCopyCrc() throws Exception {
shellRun(0, "-get", "-crc", srcPath.toString(), dstPath.toString());
checkPath(dstPath, true);
}
@Test
public void testCorruptedCopyCrc() throws Exception {
FSDataOutputStream out = lfs.getRawFileSystem().create(srcPath);
out.writeChars("bang");
out.close();
shellRun(1, "-get", srcPath.toString(), dstPath.toString());
}
@Test
public void testCorruptedCopyIgnoreCrc() throws Exception {
shellRun(0, "-get", "-ignoreCrc", srcPath.toString(), dstPath.toString());
checkPath(dstPath, false);
}
private void checkPath(Path p, boolean expectChecksum) throws IOException {
assertTrue(lfs.exists(p));
boolean hasChecksum = lfs.exists(lfs.getChecksumFile(p));
assertEquals(expectChecksum, hasChecksum);
}
private void shellRun(int n, String ... args) throws Exception {
assertEquals(n, shell.run(args));
}
}

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Common Project</description>
<name>Apache Hadoop Common Project</name>
<packaging>pom</packaging>

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-dist</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop Distribution</description>
<name>Apache Hadoop Distribution</name>
<packaging>jar</packaging>

View File

@ -19,12 +19,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-httpfs</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<packaging>war</packaging>
<name>Apache Hadoop HttpFS</name>

View File

@ -119,7 +119,28 @@ Release 0.23-PB - Unreleased
HDFS-2768. BackupNode stop can not close proxy connections because
it is not a proxy instance. (Uma Maheswara Rao G via eli)
Release 0.23.1 - UNRELEASED
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
HDFS-2887. FSVolume, is a part of FSDatasetInterface implementation, should
not be referred outside FSDataset. A new FSVolumeInterface is defined.
The BlockVolumeChoosingPolicy.chooseVolume(..) method signature is also
updated. (szetszwo)
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
HDFS-2923. Namenode IPC handler count uses the wrong configuration key
(todd)
HDFS-2764. TestBackupNode is racy. (atm)
Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES
@ -134,6 +155,10 @@ Release 0.23.1 - UNRELEASED
HDFS-2545. Change WebHDFS to support multiple namenodes in federation.
(szetszwo)
HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy
with read/write capabilities. (tucu)
IMPROVEMENTS
HDFS-2560. Refactor BPOfferService to be a static inner class (todd)
@ -167,9 +192,6 @@ Release 0.23.1 - UNRELEASED
HDFS-2604. Add a log message to show if WebHDFS is enabled and a
configuration section in the forrest doc. (szetszwo)
HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy
with read/write capabilities. (tucu)
HDFS-2511. Add dev script to generate HDFS protobufs. (tucu)
HDFS-2654. Make BlockReaderLocal not extend RemoteBlockReader2. (eli)
@ -180,7 +202,8 @@ Release 0.23.1 - UNRELEASED
HDFS-2335. DataNodeCluster and NNStorage always pull fresh entropy.
(Uma Maheswara Rao G via eli)
HDFS-2574. Remove references to some deprecated properties in conf templates and defaults files. (Joe Crobak via harsh)
HDFS-2574. Remove references to some deprecated properties in conf
templates and defaults files. (Joe Crobak via harsh)
HDFS-2722. HttpFs should not be using an int for block size. (harsh)
@ -188,21 +211,22 @@ Release 0.23.1 - UNRELEASED
suresh)
HDFS-2349. Corruption detected during block transfers between DNs
should log a WARN instead of INFO. (harsh)
should log a WARN instead of INFO. (harsh)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block
set (harsh)
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream
method (harsh)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes.
(Sho Shimauchi via harsh)
HDFS-69. Improve the 'dfsadmin' commandline help. (harsh)
HDFS-2788. HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code (eli)
HDFS-2788. HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code. (eli)
HDFS-362. FSEditLog should not writes long and short as UTF8, and should
not use ArrayWritable for writing non-array items. (Uma Maheswara Rao G
@ -215,7 +239,7 @@ Release 0.23.1 - UNRELEASED
HDFS-2818. Fix a missing space issue in HDFS webapps' title tags. (Devaraj K via harsh)
HDFS-2397. Undeprecate SecondaryNameNode (eli)
HDFS-2397. Undeprecate SecondaryNameNode. (eli)
HDFS-2814. NamenodeMXBean does not account for svn revision in the version
information. (Hitesh Shah via jitendra)
@ -228,6 +252,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
HDFS-2786. Fix host-based token incompatibilities in DFSUtil. (Kihwal
Lee via jitendra)
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)
@ -319,11 +346,14 @@ Release 0.23.1 - UNRELEASED
HDFS-442. dfsthroughput in test jar throws NPE (harsh)
HDFS-2836. HttpFSServer still has 2 javadoc warnings in trunk (revans2 via tucu)
HDFS-2836. HttpFSServer still has 2 javadoc warnings in trunk.
(revans2 via tucu)
HDFS-2837. mvn javadoc:javadoc not seeing LimitedPrivate class (revans2 via tucu)
HDFS-2837. mvn javadoc:javadoc not seeing LimitedPrivate class.
(revans2 via tucu)
HDFS-2840. TestHostnameFilter should work with localhost or localhost.localdomain (tucu)
HDFS-2840. TestHostnameFilter should work with localhost or
localhost.localdomain (tucu)
HDFS-2791. If block report races with closing of file, replica is
incorrectly marked corrupt. (todd)

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop HDFS</description>
<name>Apache Hadoop HDFS</name>
<packaging>jar</packaging>

View File

@ -608,19 +608,6 @@ public class DFSUtil {
return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
}
/**
* @param address address of format host:port
* @return InetSocketAddress for the address
*/
public static InetSocketAddress getSocketAddress(String address) {
int colon = address.indexOf(":");
if (colon < 0) {
return new InetSocketAddress(address, 0);
}
return new InetSocketAddress(address.substring(0, colon),
Integer.parseInt(address.substring(colon + 1)));
}
/**
* Round bytes to GiB (gibibyte)
* @param bytes number of bytes

View File

@ -498,7 +498,7 @@ public class JspHelper {
String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS);
InetSocketAddress namenodeAddress = null;
if (namenodeAddressInUrl != null) {
namenodeAddress = DFSUtil.getSocketAddress(namenodeAddressInUrl);
namenodeAddress = NetUtils.createSocketAddr(namenodeAddressInUrl);
} else if (context != null) {
namenodeAddress = NameNodeHttpServer.getNameNodeAddressFromContext(
context);

View File

@ -46,15 +46,14 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
/**
* Performs two types of scanning:
* <li> Gets block files from the data directories and reconciles the
* difference between the blocks on the disk and in memory in
* {@link FSDataset}</li>
* difference between the blocks on the disk and in memory.</li>
* <li> Scans the data directories for block files under a block pool
* and verifies that the files are not corrupt</li>
* This keeps track of blocks and their last verification times.
@ -78,7 +77,7 @@ class BlockPoolSliceScanner {
private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
private DataNode datanode;
private FSDataset dataset;
private final FSDatasetInterface dataset;
// sorted set
private TreeSet<BlockScanInfo> blockInfoSet;
@ -137,8 +136,8 @@ class BlockPoolSliceScanner {
}
}
BlockPoolSliceScanner(DataNode datanode, FSDataset dataset, Configuration conf,
String bpid) {
BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset,
Configuration conf, String bpid) {
this.datanode = datanode;
this.dataset = dataset;
this.blockPoolId = bpid;
@ -220,16 +219,16 @@ class BlockPoolSliceScanner {
* otherwise, pick the first directory.
*/
File dir = null;
List<FSVolume> volumes = dataset.volumes.getVolumes();
for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
File bpDir = vol.getBlockPoolSlice(blockPoolId).getDirectory();
List<FSVolumeInterface> volumes = dataset.getVolumes();
for (FSVolumeInterface vol : volumes) {
File bpDir = vol.getDirectory(blockPoolId);
if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
dir = bpDir;
break;
}
}
if (dir == null) {
dir = volumes.get(0).getBlockPoolSlice(blockPoolId).getDirectory();
dir = volumes.get(0).getDirectory(blockPoolId);
}
try {
@ -577,8 +576,8 @@ class BlockPoolSliceScanner {
bytesLeft += len;
}
static File getCurrentFile(FSVolume vol, String bpid) throws IOException {
return LogFileHandler.getCurrentFile(vol.getBlockPoolSlice(bpid).getDirectory(),
static File getCurrentFile(FSVolumeInterface vol, String bpid) throws IOException {
return LogFileHandler.getCurrentFile(vol.getDirectory(bpid),
BlockPoolSliceScanner.verificationLogFile);
}

View File

@ -22,7 +22,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**************************************************
* BlockVolumeChoosingPolicy allows a DataNode to
@ -46,7 +46,7 @@ public interface BlockVolumeChoosingPolicy {
* @return the chosen volume to store the block.
* @throws IOException when disks are unavailable or are full.
*/
public FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
public FSVolumeInterface chooseVolume(List<FSVolumeInterface> volumes, long blockSize)
throws IOException;
}

View File

@ -27,12 +27,12 @@ import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* DataBlockScanner manages block scanning for all the block pools. For each
@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFactory;
public class DataBlockScanner implements Runnable {
public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
private final DataNode datanode;
private final FSDataset dataset;
private final FSDatasetInterface dataset;
private final Configuration conf;
/**
@ -55,7 +55,7 @@ public class DataBlockScanner implements Runnable {
new TreeMap<String, BlockPoolSliceScanner>();
Thread blockScannerThread = null;
DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) {
this.datanode = datanode;
this.dataset = dataset;
this.conf = conf;
@ -135,7 +135,7 @@ public class DataBlockScanner implements Runnable {
.iterator();
while (bpidIterator.hasNext()) {
String bpid = bpidIterator.next();
for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
for (FSDatasetInterface.FSVolumeInterface vol : dataset.getVolumes()) {
try {
File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
if (currFile.exists()) {

View File

@ -125,7 +125,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@ -582,11 +581,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
reason = "verification is turned off by configuration";
} else if (!(data instanceof FSDataset)) {
reason = "verifcation is supported only with FSDataset";
} else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
blockScanner = new DataBlockScanner(this, (FSDataset)data, conf);
blockScanner = new DataBlockScanner(this, data, conf);
blockScanner.start();
} else {
LOG.info("Periodic Block Verification scan is disabled because " +
@ -611,11 +610,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
reason = "verification is turned off by configuration";
} else if (!(data instanceof FSDataset)) {
reason = "verification is supported only with FSDataset";
} else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf);
directoryScanner = new DirectoryScanner(this, data, conf);
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@ -2237,16 +2236,7 @@ public class DataNode extends Configured
*/
@Override // DataNodeMXBean
public String getVolumeInfo() {
final Map<String, Object> info = new HashMap<String, Object>();
Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
for (VolumeInfo v : volumes) {
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("usedSpace", v.usedSpace);
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
info.put(v.directory, innerInfo);
}
return JSON.toString(info);
return JSON.toString(data.getVolumeInfoMap());
}
@Override // DataNodeMXBean

View File

@ -751,7 +751,7 @@ public class DataStorage extends Storage {
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
if (matcher.matches()) {
//return the current metadata file name
return FSDataset.getMetaFileName(matcher.group(1),
return DatanodeUtil.getMetaFileName(matcher.group(1),
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
}
return oldFileName;

View File

@ -53,18 +53,30 @@ import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private
public class DatanodeJspHelper {
private static DFSClient getDFSClient(final UserGroupInformation user,
final InetSocketAddress addr,
final String addr,
final Configuration conf
) throws IOException,
InterruptedException {
return
user.doAs(new PrivilegedExceptionAction<DFSClient>() {
public DFSClient run() throws IOException {
return new DFSClient(addr, conf);
return new DFSClient(NetUtils.createSocketAddr(addr), conf);
}
});
}
/**
* Internal convenience method for canonicalizing host name.
* @param addr name:port or name
* @return canonicalized host name
*/
private static String canonicalize(String addr) {
// default port 1 is supplied to allow addr without port.
// the port will be ignored.
return NetUtils.createSocketAddr(addr, 1).getAddress()
.getCanonicalHostName();
}
private static final SimpleDateFormat lsDateFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm");
@ -102,8 +114,7 @@ public class DatanodeJspHelper {
return;
}
InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr);
DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
String target = dir;
final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
if (targetStatus == null) { // not exists
@ -125,8 +136,7 @@ public class DatanodeJspHelper {
out.print("Empty file");
} else {
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
String fqdn = InetAddress.getByName(chosenNode.getHost())
.getCanonicalHostName();
String fqdn = canonicalize(chosenNode.getHost());
String datanodeAddr = chosenNode.getName();
int datanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
@ -210,9 +220,8 @@ public class DatanodeJspHelper {
JspHelper.addTableFooter(out);
}
}
String namenodeHost = namenodeAddress.getHostName();
out.print("<br><a href=\"http://"
+ InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":"
+ canonicalize(nnAddr) + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close();
}
@ -282,8 +291,7 @@ public class DatanodeJspHelper {
}
long blockSize = Long.parseLong(blockSizeStr);
final InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr);
final DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
// Add the various links for looking at the file contents
@ -305,8 +313,7 @@ public class DatanodeJspHelper {
dfs.close();
return;
}
String fqdn = InetAddress.getByName(chosenNode.getHost())
.getCanonicalHostName();
String fqdn = canonicalize(chosenNode.getHost());
String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort
@ -345,9 +352,7 @@ public class DatanodeJspHelper {
// generate a table and dump the info
out.println("\n<table>");
String namenodeHost = namenodeAddress.getHostName();
String namenodeHostName = InetAddress.getByName(namenodeHost).getCanonicalHostName();
String nnCanonicalName = canonicalize(nnAddr);
for (LocatedBlock cur : blocks) {
out.print("<tr>");
final String blockidstring = Long.toString(cur.getBlock().getBlockId());
@ -358,7 +363,7 @@ public class DatanodeJspHelper {
String datanodeAddr = locs[j].getName();
datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
.indexOf(':') + 1, datanodeAddr.length()));
fqdn = InetAddress.getByName(locs[j].getHost()).getCanonicalHostName();
fqdn = canonicalize(locs[j].getHost());
String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
+ "/browseBlock.jsp?blockId=" + blockidstring
+ "&blockSize=" + blockSize
@ -370,7 +375,7 @@ public class DatanodeJspHelper {
+ JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
String blockInfoUrl = "http://" + namenodeHostName + ":"
String blockInfoUrl = "http://" + nnCanonicalName + ":"
+ namenodeInfoPort
+ "/block_info_xml.jsp?blockId=" + blockidstring;
out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">"
@ -382,7 +387,7 @@ public class DatanodeJspHelper {
out.println("</table>");
out.print("<hr>");
out.print("<br><a href=\"http://"
+ InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":"
+ nnCanonicalName + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close();
}
@ -419,8 +424,7 @@ public class DatanodeJspHelper {
return;
}
final DFSClient dfs = getDFSClient(ugi,
DFSUtil.getSocketAddress(nnAddr), conf);
final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
String bpid = null;
Token<BlockTokenIdentifier> blockToken = BlockTokenSecretManager.DUMMY_TOKEN;
@ -518,8 +522,7 @@ public class DatanodeJspHelper {
String datanodeAddr = d.getName();
nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
nextHost = InetAddress.getByName(d.getHost())
.getCanonicalHostName();
nextHost = d.getHost();
nextPort = d.getInfoPort();
}
}
@ -533,7 +536,7 @@ public class DatanodeJspHelper {
}
String nextUrl = null;
if (nextBlockIdStr != null) {
nextUrl = "http://" + nextHost + ":" + nextPort
nextUrl = "http://" + canonicalize(nextHost) + ":" + nextPort
+ "/browseBlock.jsp?blockId=" + nextBlockIdStr
+ "&blockSize=" + nextBlockSize
+ "&startOffset=" + nextStartOffset
@ -573,8 +576,7 @@ public class DatanodeJspHelper {
String datanodeAddr = d.getName();
prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
prevHost = InetAddress.getByName(d.getHost())
.getCanonicalHostName();
prevHost = d.getHost();
prevPort = d.getInfoPort();
}
}
@ -591,7 +593,7 @@ public class DatanodeJspHelper {
String prevUrl = null;
if (prevBlockIdStr != null) {
prevUrl = "http://" + prevHost + ":" + prevPort
prevUrl = "http://" + canonicalize(prevHost) + ":" + prevPort
+ "/browseBlock.jsp?blockId=" + prevBlockIdStr
+ "&blockSize=" + prevBlockSize
+ "&startOffset=" + prevStartOffset
@ -669,8 +671,7 @@ public class DatanodeJspHelper {
+ "\">");
// fetch the block from the datanode that has the last block for this file
final DFSClient dfs = getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr),
conf);
final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
if (blocks == null || blocks.size() == 0) {
@ -710,6 +711,6 @@ public class DatanodeJspHelper {
final DataNode datanode, final Configuration conf,
final UserGroupInformation ugi) throws IOException, InterruptedException {
final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
return getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr), conf);
return getDFSClient(ugi, nnAddr, conf);
}
}

View File

@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
@ -26,6 +28,10 @@ import org.apache.hadoop.hdfs.protocol.Block;
/** Provide utility methods for Datanode. */
@InterfaceAudience.Private
class DatanodeUtil {
static final String METADATA_EXTENSION = ".meta";
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
private final static String DISK_ERROR = "Possible disk error on file creation: ";
/** Get the cause of an I/O exception if caused by a possible disk error
@ -64,4 +70,37 @@ class DatanodeUtil {
}
return f;
}
static String getMetaFileName(String blockFileName, long genStamp) {
return blockFileName + "_" + genStamp + METADATA_EXTENSION;
}
static File getMetaFile(File f, long genStamp) {
return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
}
/** Find the corresponding meta data file from a given block file */
static File findMetaFile(final File blockFile) throws IOException {
final String prefix = blockFile.getName() + "_";
final File parent = blockFile.getParentFile();
File[] matches = parent.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return dir.equals(parent)
&& name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
}
});
if (matches == null || matches.length == 0) {
throw new IOException("Meta file not found, blockFile=" + blockFile);
}
else if (matches.length > 1) {
throw new IOException("Found more than one meta files: "
+ Arrays.asList(matches));
}
return matches[0];
}
static File getUnlinkTmpFile(File f) {
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
}
}

View File

@ -43,20 +43,19 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.Daemon;
/**
* Periodically scans the data directories for block and block metadata files.
* Reconciles the differences with block information maintained in
* {@link FSDataset}
* Reconciles the differences with block information maintained in the dataset.
*/
@InterfaceAudience.Private
public class DirectoryScanner implements Runnable {
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
private final DataNode datanode;
private final FSDataset dataset;
private final FSDatasetInterface dataset;
private final ExecutorService reportCompileThreadPool;
private final ScheduledExecutorService masterThread;
private final long scanPeriodMsecs;
@ -158,13 +157,13 @@ public class DirectoryScanner implements Runnable {
private final long blockId;
private final File metaFile;
private final File blockFile;
private final FSVolume volume;
private final FSVolumeInterface volume;
ScanInfo(long blockId) {
this(blockId, null, null, null);
}
ScanInfo(long blockId, File blockFile, File metaFile, FSVolume vol) {
ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
this.blockId = blockId;
this.metaFile = metaFile;
this.blockFile = blockFile;
@ -183,7 +182,7 @@ public class DirectoryScanner implements Runnable {
return blockId;
}
FSVolume getVolume() {
FSVolumeInterface getVolume() {
return volume;
}
@ -220,7 +219,7 @@ public class DirectoryScanner implements Runnable {
}
}
DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) {
this.datanode = dn;
this.dataset = dataset;
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@ -269,7 +268,7 @@ public class DirectoryScanner implements Runnable {
return;
}
String[] bpids = dataset.getBPIdlist();
String[] bpids = dataset.getBlockPoolList();
for(String bpid : bpids) {
UpgradeManagerDatanode um =
datanode.getUpgradeManagerDatanode(bpid);
@ -411,17 +410,29 @@ public class DirectoryScanner implements Runnable {
diffRecord.add(new ScanInfo(blockId));
}
/** Is the given volume still valid in the dataset? */
private static boolean isValid(final FSDatasetInterface dataset,
final FSVolumeInterface volume) {
for (FSVolumeInterface vol : dataset.getVolumes()) {
if (vol == volume) {
return true;
}
}
return false;
}
/** Get lists of blocks on the disk sorted by blockId, per blockpool */
private Map<String, ScanInfo[]> getDiskReport() {
// First get list of data directories
List<FSVolume> volumes = dataset.volumes.getVolumes();
final List<FSVolumeInterface> volumes = dataset.getVolumes();
ArrayList<ScanInfoPerBlockPool> dirReports =
new ArrayList<ScanInfoPerBlockPool>(volumes.size());
Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
for (int i = 0; i < volumes.size(); i++) {
if (!dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
if (!isValid(dataset, volumes.get(i))) {
// volume is invalid
dirReports.add(i, null);
} else {
ReportCompiler reportCompiler =
@ -446,7 +457,8 @@ public class DirectoryScanner implements Runnable {
// Compile consolidated report for all the volumes
ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
for (int i = 0; i < volumes.size(); i++) {
if (dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
if (isValid(dataset, volumes.get(i))) {
// volume is still valid
list.addAll(dirReports.get(i));
}
}
@ -461,9 +473,9 @@ public class DirectoryScanner implements Runnable {
private static class ReportCompiler
implements Callable<ScanInfoPerBlockPool> {
private FSVolume volume;
private FSVolumeInterface volume;
public ReportCompiler(FSVolume volume) {
public ReportCompiler(FSVolumeInterface volume) {
this.volume = volume;
}
@ -473,14 +485,14 @@ public class DirectoryScanner implements Runnable {
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
for (String bpid : bpList) {
LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
File bpFinalizedDir = volume.getBlockPoolSlice(bpid).getFinalizedDir();
File bpFinalizedDir = volume.getFinalizedDir(bpid);
result.put(bpid, compileReport(volume, bpFinalizedDir, report));
}
return result;
}
/** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
private LinkedList<ScanInfo> compileReport(FSVolume vol, File dir,
private LinkedList<ScanInfo> compileReport(FSVolumeInterface vol, File dir,
LinkedList<ScanInfo> report) {
File[] files;
try {

View File

@ -23,7 +23,6 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
@ -81,14 +80,13 @@ class FSDataset implements FSDatasetInterface {
* A node type that can be built into a tree reflecting the
* hierarchy of blocks on the local disk.
*/
class FSDir {
File dir;
private class FSDir {
final File dir;
int numBlocks = 0;
FSDir children[];
int lastChildIdx = 0;
/**
*/
public FSDir(File dir)
private FSDir(File dir)
throws IOException {
this.dir = dir;
this.children = null;
@ -113,7 +111,7 @@ class FSDataset implements FSDatasetInterface {
}
}
public File addBlock(Block b, File src) throws IOException {
private File addBlock(Block b, File src) throws IOException {
//First try without creating subdirectories
File file = addBlock(b, src, false, false);
return (file != null) ? file : addBlock(b, src, true, true);
@ -161,7 +159,7 @@ class FSDataset implements FSDatasetInterface {
return children[ lastChildIdx ].addBlock(b, src, true, false);
}
void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
private void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
throws IOException {
if (children != null) {
for (int i = 0; i < children.length; i++) {
@ -207,7 +205,7 @@ class FSDataset implements FSDatasetInterface {
* check if a data diretory is healthy
* @throws DiskErrorException
*/
public void checkDirTree() throws DiskErrorException {
private void checkDirTree() throws DiskErrorException {
DiskChecker.checkDir(dir);
if (children != null) {
@ -217,7 +215,7 @@ class FSDataset implements FSDatasetInterface {
}
}
void clearPath(File f) {
private void clearPath(File f) {
String root = dir.getAbsolutePath();
String dir = f.getAbsolutePath();
if (dir.startsWith(root)) {
@ -270,7 +268,8 @@ class FSDataset implements FSDatasetInterface {
}
return false;
}
@Override
public String toString() {
return "FSDir{" +
"dir=" + dir +
@ -284,7 +283,7 @@ class FSDataset implements FSDatasetInterface {
* Taken together, all BlockPoolSlices sharing a block pool ID across a
* cluster represent a single block pool.
*/
class BlockPoolSlice {
private class BlockPoolSlice {
private final String bpid;
private final FSVolume volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current
@ -342,11 +341,7 @@ class FSDataset implements FSDatasetInterface {
File getDirectory() {
return currentDir.getParentFile();
}
File getCurrentDir() {
return currentDir;
}
File getFinalizedDir() {
return finalizedDir.dir;
}
@ -387,7 +382,7 @@ class FSDataset implements FSDatasetInterface {
File addBlock(Block b, File f) throws IOException {
File blockFile = finalizedDir.addBlock(b, f);
File metaFile = getMetaFile(blockFile , b.getGenerationStamp());
File metaFile = DatanodeUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
@ -455,7 +450,7 @@ class FSDataset implements FSDatasetInterface {
DataInputStream checksumIn = null;
InputStream blockIn = null;
try {
File metaFile = new File(getMetaFileName(blockFile.toString(), genStamp));
final File metaFile = DatanodeUtil.getMetaFile(blockFile, genStamp);
long blockFileLen = blockFile.length();
long metaFileLen = metaFile.length();
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
@ -521,7 +516,7 @@ class FSDataset implements FSDatasetInterface {
}
}
class FSVolume {
class FSVolume implements FSVolumeInterface {
private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
private final File currentDir; // <StorageDirectory>/current
private final DF usage;
@ -534,11 +529,6 @@ class FSDataset implements FSDatasetInterface {
File parent = currentDir.getParentFile();
this.usage = new DF(parent, conf);
}
/** Return storage directory corresponding to the volume */
File getDir() {
return currentDir.getParentFile();
}
File getCurrentDir() {
return currentDir;
@ -583,8 +573,9 @@ class FSDataset implements FSDatasetInterface {
long remaining = usage.getCapacity() - reserved;
return remaining > 0 ? remaining : 0;
}
long getAvailable() throws IOException {
@Override
public long getAvailable() throws IOException {
long remaining = getCapacity()-getDfsUsed();
long available = usage.getAvailable();
if (remaining>available) {
@ -600,19 +591,30 @@ class FSDataset implements FSDatasetInterface {
String getMount() throws IOException {
return usage.getMount();
}
BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
private BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
BlockPoolSlice bp = map.get(bpid);
if (bp == null) {
throw new IOException("block pool " + bpid + " is not found");
}
return bp;
}
@Override
public File getDirectory(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getDirectory();
}
@Override
public File getFinalizedDir(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getFinalizedDir();
}
/**
* Make a deep copy of the list of currently active BPIDs
*/
String[] getBlockPoolList() {
@Override
public String[] getBlockPoolList() {
synchronized(FSDataset.this) {
return map.keySet().toArray(new String[map.keySet().size()]);
}
@ -681,7 +683,8 @@ class FSDataset implements FSDatasetInterface {
BlockPoolSlice bp = getBlockPoolSlice(bpid);
bp.clearPath(f);
}
@Override
public String toString() {
return currentDir.getAbsolutePath();
}
@ -773,21 +776,18 @@ class FSDataset implements FSDatasetInterface {
* Read access to this unmodifiable list is not synchronized.
* This list is replaced on modification holding "this" lock.
*/
private volatile List<FSVolume> volumes = null;
private volatile List<FSVolumeInterface> volumes = null;
BlockVolumeChoosingPolicy blockChooser;
int numFailedVolumes;
FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) {
List<FSVolume> list = Arrays.asList(volumes);
this.volumes = Collections.unmodifiableList(list);
FSVolumeSet(List<FSVolumeInterface> volumes, int failedVols,
BlockVolumeChoosingPolicy blockChooser) {
this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
private int numberOfVolumes() {
return volumes.size();
}
private int numberOfFailedVolumes() {
return numFailedVolumes;
}
@ -800,36 +800,36 @@ class FSDataset implements FSDatasetInterface {
* @return next volume to store the block in.
*/
synchronized FSVolume getNextVolume(long blockSize) throws IOException {
return blockChooser.chooseVolume(volumes, blockSize);
return (FSVolume)blockChooser.chooseVolume(volumes, blockSize);
}
private long getDfsUsed() throws IOException {
long dfsUsed = 0L;
for (FSVolume vol : volumes) {
dfsUsed += vol.getDfsUsed();
for (FSVolumeInterface v : volumes) {
dfsUsed += ((FSVolume)v).getDfsUsed();
}
return dfsUsed;
}
private long getBlockPoolUsed(String bpid) throws IOException {
long dfsUsed = 0L;
for (FSVolume vol : volumes) {
dfsUsed += vol.getBlockPoolUsed(bpid);
for (FSVolumeInterface v : volumes) {
dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid);
}
return dfsUsed;
}
private long getCapacity() throws IOException {
long capacity = 0L;
for (FSVolume vol : volumes) {
capacity += vol.getCapacity();
for (FSVolumeInterface v : volumes) {
capacity += ((FSVolume)v).getCapacity();
}
return capacity;
}
private long getRemaining() throws IOException {
long remaining = 0L;
for (FSVolume vol : volumes) {
for (FSVolumeInterface vol : volumes) {
remaining += vol.getAvailable();
}
return remaining;
@ -837,15 +837,15 @@ class FSDataset implements FSDatasetInterface {
private void getVolumeMap(ReplicasMap volumeMap)
throws IOException {
for (FSVolume vol : volumes) {
vol.getVolumeMap(volumeMap);
for (FSVolumeInterface v : volumes) {
((FSVolume)v).getVolumeMap(volumeMap);
}
}
private void getVolumeMap(String bpid, ReplicasMap volumeMap)
throws IOException {
for (FSVolume vol : volumes) {
vol.getVolumeMap(bpid, volumeMap);
for (FSVolumeInterface v : volumes) {
((FSVolume)v).getVolumeMap(bpid, volumeMap);
}
}
@ -861,10 +861,10 @@ class FSDataset implements FSDatasetInterface {
ArrayList<FSVolume> removedVols = null;
// Make a copy of volumes for performing modification
List<FSVolume> volumeList = new ArrayList<FSVolume>(getVolumes());
final List<FSVolumeInterface> volumeList = new ArrayList<FSVolumeInterface>(volumes);
for (int idx = 0; idx < volumeList.size(); idx++) {
FSVolume fsv = volumeList.get(idx);
FSVolume fsv = (FSVolume)volumeList.get(idx);
try {
fsv.checkDirs();
} catch (DiskErrorException e) {
@ -881,8 +881,8 @@ class FSDataset implements FSDatasetInterface {
// Remove null volumes from the volumes array
if (removedVols != null && removedVols.size() > 0) {
List<FSVolume> newVols = new ArrayList<FSVolume>();
for (FSVolume vol : volumeList) {
List<FSVolumeInterface> newVols = new ArrayList<FSVolumeInterface>();
for (FSVolumeInterface vol : volumeList) {
if (vol != null) {
newVols.add(vol);
}
@ -895,44 +895,30 @@ class FSDataset implements FSDatasetInterface {
return removedVols;
}
@Override
public String toString() {
return volumes.toString();
}
boolean isValid(FSVolume volume) {
for (FSVolume vol : volumes) {
if (vol == volume) {
return true;
}
}
return false;
}
private void addBlockPool(String bpid, Configuration conf)
throws IOException {
for (FSVolume v : volumes) {
v.addBlockPool(bpid, conf);
for (FSVolumeInterface v : volumes) {
((FSVolume)v).addBlockPool(bpid, conf);
}
}
private void removeBlockPool(String bpid) {
for (FSVolume v : volumes) {
v.shutdownBlockPool(bpid);
for (FSVolumeInterface v : volumes) {
((FSVolume)v).shutdownBlockPool(bpid);
}
}
/**
* @return unmodifiable list of volumes
*/
public List<FSVolume> getVolumes() {
return volumes;
}
private void shutdown() {
for (FSVolume volume : volumes) {
for (FSVolumeInterface volume : volumes) {
if(volume != null) {
volume.shutdown();
((FSVolume)volume).shutdown();
}
}
}
@ -944,35 +930,20 @@ class FSDataset implements FSDatasetInterface {
//
//////////////////////////////////////////////////////
//Find better place?
static final String METADATA_EXTENSION = ".meta";
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
private static boolean isUnlinkTmpFile(File f) {
String name = f.getName();
return name.endsWith(UNLINK_BLOCK_SUFFIX);
}
static File getUnlinkTmpFile(File f) {
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
return name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
private static File getOrigFile(File unlinkTmpFile) {
String fileName = unlinkTmpFile.getName();
return new File(unlinkTmpFile.getParentFile(),
fileName.substring(0, fileName.length()-UNLINK_BLOCK_SUFFIX.length()));
}
static String getMetaFileName(String blockFileName, long genStamp) {
return blockFileName + "_" + genStamp + METADATA_EXTENSION;
}
static File getMetaFile(File f , long genStamp) {
return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
fileName.substring(0,
fileName.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length()));
}
protected File getMetaFile(ExtendedBlock b) throws IOException {
return getMetaFile(getBlockFile(b), b.getGenerationStamp());
return DatanodeUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
}
/** Find the metadata file for the specified block file.
@ -994,34 +965,13 @@ class FSDataset implements FSDatasetInterface {
" does not have a metafile!");
return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
/** Find the corresponding meta data file from a given block file */
private static File findMetaFile(final File blockFile) throws IOException {
final String prefix = blockFile.getName() + "_";
final File parent = blockFile.getParentFile();
File[] matches = parent.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return dir.equals(parent)
&& name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
}
});
if (matches == null || matches.length == 0) {
throw new IOException("Meta file not found, blockFile=" + blockFile);
}
else if (matches.length > 1) {
throw new IOException("Found more than one meta files: "
+ Arrays.asList(matches));
}
return matches[0];
}
/** Find the corresponding meta data file from a given block file */
private static long parseGenerationStamp(File blockFile, File metaFile
) throws IOException {
String metaname = metaFile.getName();
String gs = metaname.substring(blockFile.getName().length() + 1,
metaname.length() - METADATA_EXTENSION.length());
metaname.length() - DatanodeUtil.METADATA_EXTENSION.length());
try {
return Long.parseLong(gs);
} catch(NumberFormatException nfe) {
@ -1030,6 +980,11 @@ class FSDataset implements FSDatasetInterface {
}
}
@Override // FSDatasetInterface
public List<FSVolumeInterface> getVolumes() {
return volumes.volumes;
}
@Override // FSDatasetInterface
public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException {
@ -1037,7 +992,7 @@ class FSDataset implements FSDatasetInterface {
if (blockfile == null) {
return null;
}
File metafile = findMetaFile(blockfile);
final File metafile = DatanodeUtil.findMetaFile(blockfile);
return new Block(blkid, blockfile.length(),
parseGenerationStamp(blockfile, metafile));
}
@ -1101,7 +1056,7 @@ class FSDataset implements FSDatasetInterface {
/**
* An FSDataset has a directory where it loads its data files.
*/
public FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
throws IOException {
this.datanode = datanode;
this.maxBlocksPerDir =
@ -1134,12 +1089,12 @@ class FSDataset implements FSDatasetInterface {
+ ", volume failures tolerated: " + volFailuresTolerated);
}
FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
final List<FSVolumeInterface> volArray = new ArrayList<FSVolumeInterface>(
storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
conf);
DataNode.LOG.info("FSDataset added volume - "
+ storage.getStorageDir(idx).getCurrentDir());
final File dir = storage.getStorageDir(idx).getCurrentDir();
volArray.add(new FSVolume(dir, conf));
DataNode.LOG.info("FSDataset added volume - " + dir);
}
volumeMap = new ReplicasMap(this);
@ -1185,7 +1140,7 @@ class FSDataset implements FSDatasetInterface {
*/
@Override // FSDatasetInterface
public boolean hasEnoughResource() {
return volumes.numberOfVolumes() >= validVolsRequired;
return getVolumes().size() >= validVolsRequired;
}
/**
@ -1368,8 +1323,8 @@ class FSDataset implements FSDatasetInterface {
private static File moveBlockFiles(Block b, File srcfile, File destdir
) throws IOException {
final File dstfile = new File(destdir, b.getBlockName());
final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp());
final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp());
final File srcmeta = DatanodeUtil.getMetaFile(srcfile, b.getGenerationStamp());
final File dstmeta = DatanodeUtil.getMetaFile(dstfile, b.getGenerationStamp());
if (!srcmeta.renameTo(dstmeta)) {
throw new IOException("Failed to move meta file for " + b
+ " from " + srcmeta + " to " + dstmeta);
@ -1487,7 +1442,7 @@ class FSDataset implements FSDatasetInterface {
// construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile();
FSVolume v = replicaInfo.getVolume();
FSVolume v = (FSVolume)replicaInfo.getVolume();
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo);
@ -1744,7 +1699,7 @@ class FSDataset implements FSDatasetInterface {
+ visible + ", temp=" + temp);
}
// check volume
final FSVolume v = temp.getVolume();
final FSVolume v = (FSVolume)temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
@ -1805,7 +1760,7 @@ class FSDataset implements FSDatasetInterface {
if ( vol == null ) {
ReplicaInfo replica = volumeMap.get(bpid, blk);
if (replica != null) {
vol = volumeMap.get(bpid, blk).getVolume();
vol = (FSVolume)volumeMap.get(bpid, blk).getVolume();
}
if ( vol == null ) {
throw new IOException("Could not find volume for block " + blk);
@ -1845,7 +1800,7 @@ class FSDataset implements FSDatasetInterface {
newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
} else {
FSVolume v = replicaInfo.getVolume();
FSVolume v = (FSVolume)replicaInfo.getVolume();
File f = replicaInfo.getBlockFile();
if (v == null) {
throw new IOException("No volume for temporary file " + f +
@ -1943,7 +1898,8 @@ class FSDataset implements FSDatasetInterface {
/**
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
synchronized List<Block> getFinalizedBlocks(String bpid) {
@Override
public synchronized List<Block> getFinalizedBlocks(String bpid) {
ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
if(b.getState() == ReplicaState.FINALIZED) {
@ -2016,7 +1972,7 @@ class FSDataset implements FSDatasetInterface {
}
//check replica's meta file
final File metafile = getMetaFile(f, r.getGenerationStamp());
final File metafile = DatanodeUtil.getMetaFile(f, r.getGenerationStamp());
if (!metafile.exists()) {
throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
}
@ -2047,7 +2003,7 @@ class FSDataset implements FSDatasetInterface {
error = true;
continue;
}
v = dinfo.getVolume();
v = (FSVolume)dinfo.getVolume();
if (f == null) {
DataNode.LOG.warn("Unexpected error trying to delete block "
+ invalidBlks[i] +
@ -2081,7 +2037,7 @@ class FSDataset implements FSDatasetInterface {
}
volumeMap.remove(bpid, invalidBlks[i]);
}
File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
File metaFile = DatanodeUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp());
// Delete the block asynchronously to make sure we can do it fast enough
asyncDiskService.deleteAsync(v, f, metaFile,
@ -2238,8 +2194,9 @@ class FSDataset implements FSDatasetInterface {
* @param diskMetaFile Metadata file from on the disk
* @param vol Volume of the block file
*/
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FSVolume vol) {
File diskMetaFile, FSVolumeInterface vol) {
Block corruptBlock = null;
ReplicaInfo memBlockInfo;
synchronized (this) {
@ -2327,7 +2284,7 @@ class FSDataset implements FSDatasetInterface {
// Compare generation stamp
if (memBlockInfo.getGenerationStamp() != diskGS) {
File memMetaFile = getMetaFile(diskFile,
File memMetaFile = DatanodeUtil.getMetaFile(diskFile,
memBlockInfo.getGenerationStamp());
if (memMetaFile.exists()) {
if (memMetaFile.compareTo(diskMetaFile) != 0) {
@ -2562,18 +2519,15 @@ class FSDataset implements FSDatasetInterface {
volumes.removeBlockPool(bpid);
}
/**
* get list of all bpids
* @return list of bpids
*/
public String [] getBPIdlist() throws IOException {
@Override
public String[] getBlockPoolList() {
return volumeMap.getBlockPoolList();
}
/**
* Class for representing the Datanode volume information
*/
static class VolumeInfo {
private static class VolumeInfo {
final String directory;
final long usedSpace;
final long freeSpace;
@ -2586,10 +2540,11 @@ class FSDataset implements FSDatasetInterface {
this.reservedSpace = reservedSpace;
}
}
Collection<VolumeInfo> getVolumeInfo() {
private Collection<VolumeInfo> getVolumeInfo() {
Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
for (FSVolume volume : volumes.volumes) {
for (FSVolumeInterface v : volumes.volumes) {
final FSVolume volume = (FSVolume)v;
long used = 0;
long free = 0;
try {
@ -2606,13 +2561,27 @@ class FSDataset implements FSDatasetInterface {
}
return info;
}
@Override
public Map<String, Object> getVolumeInfoMap() {
final Map<String, Object> info = new HashMap<String, Object>();
Collection<VolumeInfo> volumes = getVolumeInfo();
for (VolumeInfo v : volumes) {
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("usedSpace", v.usedSpace);
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
info.put(v.directory, innerInfo);
}
return info;
}
@Override //FSDatasetInterface
public synchronized void deleteBlockPool(String bpid, boolean force)
throws IOException {
if (!force) {
for (FSVolume volume : volumes.volumes) {
if (!volume.isBPDirEmpty(bpid)) {
for (FSVolumeInterface volume : volumes.volumes) {
if (!((FSVolume)volume).isBPDirEmpty(bpid)) {
DataNode.LOG.warn(bpid
+ " has some block files, cannot delete unless forced");
throw new IOException("Cannot delete block pool, "
@ -2620,8 +2589,8 @@ class FSDataset implements FSDatasetInterface {
}
}
}
for (FSVolume volume : volumes.volumes) {
volume.deleteBPDirectories(bpid, force);
for (FSVolumeInterface volume : volumes.volumes) {
((FSVolume)volume).deleteBPDirectories(bpid, force);
}
}
@ -2629,7 +2598,7 @@ class FSDataset implements FSDatasetInterface {
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
File datafile = getBlockFile(block);
File metafile = getMetaFile(datafile, block.getGenerationStamp());
File metafile = DatanodeUtil.getMetaFile(datafile, block.getGenerationStamp());
BlockLocalPathInfo info = new BlockLocalPathInfo(block,
datafile.getAbsolutePath(), metafile.getAbsolutePath());
return info;

View File

@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.Closeable;
import java.io.File;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@ -46,8 +49,44 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
*/
@InterfaceAudience.Private
public interface FSDatasetInterface extends FSDatasetMBean {
/**
* This is an interface for the underlying volume.
* @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
*/
interface FSVolumeInterface {
/** @return a list of block pools. */
public String[] getBlockPoolList();
/** @return the available storage space in bytes. */
public long getAvailable() throws IOException;
/** @return the directory for the block pool. */
public File getDirectory(String bpid) throws IOException;
/** @return the directory for the finalized blocks in the block pool. */
public File getFinalizedDir(String bpid) throws IOException;
}
/** @return a list of volumes. */
public List<FSVolumeInterface> getVolumes();
/** @return a volume information map (name => info). */
public Map<String, Object> getVolumeInfoMap();
/** @return a list of block pools. */
public String[] getBlockPoolList();
/** @return a list of finalized blocks for the given block pool. */
public List<Block> getFinalizedBlocks(String bpid);
/**
* Check whether the in-memory block record matches the block on the disk,
* and, in case that they are not matched, update the record or mark it
* as corrupted.
*/
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FSVolumeInterface vol);
/**
* Returns the length of the metadata file of the specified block
* @param b - the block for which the metadata length is desired

View File

@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* This class describes a replica that has been finalized.
@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaInfo {
* @param dir directory path where block and meta files are located
*/
FinalizedReplica(long blockId, long len, long genStamp,
FSVolume vol, File dir) {
FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaInfo {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
FinalizedReplica(Block block, FSVolume vol, File dir) {
FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir);
}

View File

@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/** This class represents replicas being written.
* Those are the replicas that
@ -36,7 +36,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param dir directory path where block and meta files are located
*/
ReplicaBeingWritten(long blockId, long genStamp,
FSVolume vol, File dir) {
FSVolumeInterface vol, File dir) {
super( blockId, genStamp, vol, dir);
}
@ -48,7 +48,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(Block block,
FSVolume vol, File dir, Thread writer) {
FSVolumeInterface vol, File dir, Thread writer) {
super( block, vol, dir, writer);
}
@ -62,7 +62,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(long blockId, long len, long genStamp,
FSVolume vol, File dir, Thread writer ) {
FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir, writer);
}

View File

@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param state replica state
*/
ReplicaInPipeline(long blockId, long genStamp,
FSVolume vol, File dir) {
FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
}
@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(Block block,
FSVolume vol, File dir, Thread writer) {
FSVolumeInterface vol, File dir, Thread writer) {
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer);
}
@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaInfo
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(long blockId, long len, long genStamp,
FSVolume vol, File dir, Thread writer ) {
FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir);
this.bytesAcked = len;
this.bytesOnDisk = len;

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
/**
@ -35,8 +35,10 @@ import org.apache.hadoop.io.IOUtils;
*/
@InterfaceAudience.Private
abstract public class ReplicaInfo extends Block implements Replica {
private FSVolume volume; // volume where the replica belongs
private File dir; // directory where block & meta files belong
/** volume where the replica belongs */
private FSVolumeInterface volume;
/** directory where block & meta files belong */
private File dir;
/**
* Constructor for a zero length replica
@ -45,7 +47,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
ReplicaInfo(long blockId, long genStamp, FSVolume vol, File dir) {
ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir);
}
@ -55,7 +57,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
ReplicaInfo(Block block, FSVolume vol, File dir) {
ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
this(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), vol, dir);
}
@ -69,7 +71,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* @param dir directory path where block and meta files are located
*/
ReplicaInfo(long blockId, long len, long genStamp,
FSVolume vol, File dir) {
FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp);
this.volume = vol;
this.dir = dir;
@ -111,14 +113,14 @@ abstract public class ReplicaInfo extends Block implements Replica {
* Get the volume where this replica is located on disk
* @return the volume where this replica is located on disk
*/
FSVolume getVolume() {
FSVolumeInterface getVolume() {
return volume;
}
/**
* Set the volume where this replica is located on disk
*/
void setVolume(FSVolume vol) {
void setVolume(FSVolumeInterface vol) {
this.volume = vol;
}
@ -162,7 +164,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
* be recovered (especially on Windows) on datanode restart.
*/
private void unlinkFile(File file, Block b) throws IOException {
File tmpFile = DatanodeUtil.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
try {
FileInputStream in = new FileInputStream(file);
try {

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
/**
@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends ReplicaInfo {
}
@Override //ReplicaInfo
void setVolume(FSVolume vol) {
void setVolume(FSVolumeInterface vol) {
super.setVolume(vol);
original.setVolume(vol);
}

View File

@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* This class represents a replica that is waiting to be recovered.
@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
* @param dir directory path where block and meta files are located
*/
ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
FSVolume vol, File dir) {
FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
ReplicaWaitingToBeRecovered(Block block, FSVolume vol, File dir) {
ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir);
}

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
@ -28,8 +28,8 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
private int curVolume = 0;
@Override
public synchronized FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
throws IOException {
public synchronized FSVolumeInterface chooseVolume(
List<FSVolumeInterface> volumes, long blockSize) throws IOException {
if(volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
@ -44,7 +44,7 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
long maxAvailable = 0;
while (true) {
FSVolume volume = volumes.get(curVolume);
FSVolumeInterface volume = volumes.get(curVolume);
curVolume = (curVolume + 1) % volumes.size();
long availableVolumeSize = volume.getAvailable();
if (availableVolumeSize > blockSize) { return volume; }

View File

@ -153,8 +153,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
this.metrics = NameNode.getNameNodeMetrics();
int handlerCount =
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
DFS_DATANODE_HANDLER_COUNT_DEFAULT);
conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class);

View File

@ -1127,7 +1127,7 @@ public class DFSAdmin extends FsShell {
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException {
InetSocketAddress datanodeAddr = DFSUtil.getSocketAddress(datanode);
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();

View File

@ -21,7 +21,7 @@
<property name="aspectversion" value="1.6.5"/>
<!-- TODO this has to be changed synchronously with build.xml version prop.-->
<!-- this workarounds of test-patch setting its own 'version' -->
<property name="project.version" value="0.23.0-SNAPSHOT"/>
<property name="project.version" value="0.23.2-SNAPSHOT"/>
<!-- Properties common for all fault injections -->
<property name="build-fi.dir" value="${basedir}/build-fi"/>

View File

@ -17,12 +17,14 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
@ -38,11 +40,10 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@ -988,8 +989,33 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
throws IOException {
throw new IOException("getBlockLocalPathInfo not supported.");
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
throw new UnsupportedOperationException();
}
@Override
public String[] getBlockPoolList() {
throw new UnsupportedOperationException();
}
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FSVolumeInterface vol) {
throw new UnsupportedOperationException();
}
@Override
public List<FSVolumeInterface> getVolumes() {
throw new UnsupportedOperationException();
}
@Override
public List<Block> getFinalizedBlocks(String bpid) {
throw new UnsupportedOperationException();
}
@Override
public Map<String, Object> getVolumeInfoMap() {
throw new UnsupportedOperationException();
}
}

View File

@ -23,7 +23,7 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.Assert;
@ -81,11 +80,11 @@ public class TestDataNodeMultipleRegistrations {
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
assertNotNull("No volumes in the fsdataset", volInfos);
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
for (VolumeInfo vi : volInfos) {
LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size());
@ -143,11 +142,11 @@ public class TestDataNodeMultipleRegistrations {
// check number of vlumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
assertNotNull("No volumes in the fsdataset", volInfos);
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
for (VolumeInfo vi : volInfos) {
LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size());

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
@ -29,8 +32,8 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -43,13 +46,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.net.NetUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Fine-grain testing of block files and locations after volume failure.
@ -274,8 +274,7 @@ public class TestDataNodeVolumeFailure {
String file = BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid",
block.getBlockId());
BlockReader blockReader =
BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
.getBlockToken(), 0, -1);
// nothing - if it fails - it will throw and exception
@ -372,7 +371,7 @@ public class TestDataNodeVolumeFailure {
new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith("blk_") &&
name.endsWith(FSDataset.METADATA_EXTENSION);
name.endsWith(DatanodeUtil.METADATA_EXTENSION);
}
}
);

View File

@ -30,17 +30,17 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.junit.Test;
import org.junit.Assert;
import org.junit.Test;
/** Test if a datanode can correctly upgrade itself */
public class TestDatanodeRestart {
@ -98,8 +98,9 @@ public class TestDatanodeRestart {
out.write(writeBuf);
out.hflush();
DataNode dn = cluster.getDataNodes().get(0);
for (FSVolume volume : ((FSDataset)dn.data).volumes.getVolumes()) {
File currentDir = volume.getDir().getParentFile();
for (FSVolumeInterface v : dn.data.getVolumes()) {
FSVolume volume = (FSVolume)v;
File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) {
if (isCorrupt && Block.isBlockFilename(file)) {
@ -188,7 +189,7 @@ public class TestDatanodeRestart {
} else {
src = replicaInfo.getMetaFile();
}
File dst = FSDataset.getUnlinkTmpFile(src);
File dst = DatanodeUtil.getUnlinkTmpFile(src);
if (isRename) {
src.renameTo(dst);
} else {

View File

@ -25,20 +25,20 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* Tests {@link DirectoryScanner} handling of differences
@ -142,10 +142,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create a block file in a random volume*/
private long createBlockFile() throws IOException {
List<FSVolume> volumes = fds.volumes.getVolumes();
List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
@ -155,10 +155,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create a metafile in a random volume*/
private long createMetaFile() throws IOException {
List<FSVolume> volumes = fds.volumes.getVolumes();
List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName());
@ -168,10 +168,10 @@ public class TestDirectoryScanner extends TestCase {
/** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException {
List<FSVolume> volumes = fds.volumes.getVolumes();
List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());

View File

@ -21,10 +21,10 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import junit.framework.Assert;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
@ -33,14 +33,14 @@ public class TestRoundRobinVolumesPolicy {
// Test the Round-Robin block-volume choosing algorithm.
@Test
public void testRR() throws Exception {
final List<FSVolume> volumes = new ArrayList<FSVolume>();
final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
// First volume, with 100 bytes of space.
volumes.add(Mockito.mock(FSVolume.class));
volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
// Second volume, with 200 bytes of space.
volumes.add(Mockito.mock(FSVolume.class));
volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
@ -69,14 +69,14 @@ public class TestRoundRobinVolumesPolicy {
@Test
public void testRRPolicyExceptionMessage()
throws Exception {
final List<FSVolume> volumes = new ArrayList<FSVolume>();
final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
// First volume, with 500 bytes of space.
volumes.add(Mockito.mock(FSVolume.class));
volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
// Second volume, with 600 bytes of space.
volumes.add(Mockito.mock(FSVolume.class));
volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();

View File

@ -140,7 +140,7 @@ public class TestWriteToReplica {
ReplicasMap replicasMap = dataSet.volumeMap;
FSVolume vol = dataSet.volumes.getNextVolume(0);
ReplicaInfo replicaInfo = new FinalizedReplica(
blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
@ -160,15 +160,15 @@ public class TestWriteToReplica {
blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
blocks[RWR].getLocalBlock()).getParentFile()));
replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
.getLocalBlock(), vol, vol.getDir()), 2007));
.getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));
return blocks;
}
private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
.getVolume();
final FSVolume v = (FSVolume)dataSet.volumeMap.get(
bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
long available = v.getCapacity()-v.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {

View File

@ -34,6 +34,7 @@ import java.util.Properties;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@ -59,6 +60,8 @@ import static org.mockito.Mockito.mock;
*/
public abstract class FSImageTestUtil {
public static final Log LOG = LogFactory.getLog(FSImageTestUtil.class);
/**
* The position in the fsimage header where the txid is
* written.
@ -369,6 +372,8 @@ public abstract class FSImageTestUtil {
List<Integer> txids) {
for (File nameDir : getNameNodeCurrentDirs(cluster)) {
LOG.info("examining name dir with files: " +
Joiner.on(",").join(nameDir.listFiles()));
// Should have fsimage_N for the three checkpoints
for (long checkpointTxId : txids) {
File image = new File(nameDir,

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
@ -38,15 +40,15 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import junit.framework.TestCase;
public class TestBackupNode extends TestCase {
public class TestBackupNode {
public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
@ -57,8 +59,8 @@ public class TestBackupNode extends TestCase {
static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
protected void setUp() throws Exception {
super.setUp();
@Before
public void setUp() throws Exception {
File baseDir = new File(BASE_DIR);
if(baseDir.exists())
if(!(FileUtil.fullyDelete(baseDir)))
@ -89,8 +91,7 @@ public class TestBackupNode extends TestCase {
return (BackupNode)NameNode.createNameNode(new String[]{startupOpt.getName()}, c);
}
void waitCheckpointDone(
MiniDFSCluster cluster, BackupNode backup, long txid) {
void waitCheckpointDone(MiniDFSCluster cluster, long txid) {
long thisCheckpointTxId;
do {
try {
@ -98,9 +99,8 @@ public class TestBackupNode extends TestCase {
"checkpoint txid should increase above " + txid);
Thread.sleep(1000);
} catch (Exception e) {}
thisCheckpointTxId = backup.getFSImage().getStorage()
thisCheckpointTxId = cluster.getNameNode().getFSImage().getStorage()
.getMostRecentCheckpointTxId();
} while (thisCheckpointTxId < txid);
// Check that the checkpoint got uploaded to NN successfully
@ -108,6 +108,7 @@ public class TestBackupNode extends TestCase {
Collections.singletonList((int)thisCheckpointTxId));
}
@Test
public void testCheckpointNode() throws Exception {
testCheckpoint(StartupOption.CHECKPOINT);
}
@ -117,6 +118,7 @@ public class TestBackupNode extends TestCase {
* and keep in sync, even while the NN rolls, checkpoints
* occur, etc.
*/
@Test
public void testBackupNodeTailsEdits() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
@ -234,6 +236,7 @@ public class TestBackupNode extends TestCase {
FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.of("VERSION"));
}
@Test
public void testBackupNode() throws Exception {
testCheckpoint(StartupOption.BACKUP);
}
@ -270,7 +273,7 @@ public class TestBackupNode extends TestCase {
//
long txid = cluster.getNameNodeRpc().getTransactionID();
backup = startBackupNode(conf, op, 1);
waitCheckpointDone(cluster, backup, txid);
waitCheckpointDone(cluster, txid);
} catch(IOException e) {
LOG.error("Error in TestBackupNode:", e);
assertTrue(e.getLocalizedMessage(), false);
@ -305,7 +308,7 @@ public class TestBackupNode extends TestCase {
//
backup = startBackupNode(conf, op, 1);
long txid = cluster.getNameNodeRpc().getTransactionID();
waitCheckpointDone(cluster, backup, txid);
waitCheckpointDone(cluster, txid);
for (int i = 0; i < 10; i++) {
fileSys.mkdirs(new Path("file_" + i));
@ -313,11 +316,11 @@ public class TestBackupNode extends TestCase {
txid = cluster.getNameNodeRpc().getTransactionID();
backup.doCheckpoint();
waitCheckpointDone(cluster, backup, txid);
waitCheckpointDone(cluster, txid);
txid = cluster.getNameNodeRpc().getTransactionID();
backup.doCheckpoint();
waitCheckpointDone(cluster, backup, txid);
waitCheckpointDone(cluster, txid);
} catch(IOException e) {
LOG.error("Error in TestBackupNode:", e);

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop HDFS Project</description>
<name>Apache Hadoop HDFS Project</name>
<packaging>pom</packaging>

View File

@ -26,7 +26,17 @@ Release 0.23-PB - Unreleased
MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch
for HDFS-2895. (Suresh Srinivas via vinodkv)
Release 0.23.1 - Unreleased
Release 0.23.2 - UNRELEASED
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
Release 0.23.1 - 2012-02-08
NEW FEATURES
@ -48,6 +58,8 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3375. [Gridmix] Memory Emulation system tests.
(Vinay Thota via amarrk)
MAPREDUCE-3840. JobEndNotifier doesn't use the proxyToUse during connecting
(Ravi Prakash via bobby)
MAPREDUCE-2733. [Gridmix] Gridmix3 cpu emulation system tests.
(Vinay Thota via amarrk)
@ -80,6 +92,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3102. Changed NodeManager to fail fast when LinuxContainerExecutor
has wrong configuration or permissions. (Hitesh Shah via vinodkv)
MAPREDUCE-3415. improve MiniMRYarnCluster & DistributedShell JAR
resolution. (tucu)
MAPREDUCE-3169. Create a new MiniMRCluster equivalent which only provides
client APIs cross MR1 and MR2. (Ahmed via tucu)
@ -221,6 +236,8 @@ Release 0.23.1 - Unreleased
acmurthy)
BUG FIXES
MAPREDUCE-3770. Zombie.getJobConf() results into NPE. (amarrk)
MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks
(Dave Thompson via bobby)
@ -720,7 +737,37 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3709. TestDistributedShell is failing. (Hitesh Shah via
mahadev)
MAPREDUCE-3436. JobHistory webapp address should use the host configured
in the jobhistory address. (Ahmed Radwan via sseth)
MAPREDUCE-3815. Fixed MR AM to always use hostnames and never IPs when
requesting containers so that scheduler can give off data local containers
correctly. (Siddarth Seth via vinodkv)
MAPREDUCE-3833. Fixed a bug in reinitiaziling of queues. (Jason Lowe via
acmurthy)
MAPREDUCE-3826. Fixed a bug in RM web-ui which broke sorting. (Jonathan
Eagles via acmurthy)
MAPREDUCE-3823. Ensure counters are calculated only once after a job
finishes. (Vinod Kumar Vavilapalli via sseth)
MAPREDUCE-3827. Changed Counters to use ConcurrentSkipListMap for
performance. (vinodkv via acmurthy)
MAPREDUCE-3822. Changed FS counter computation to use all occurences of
the same FS scheme, instead of randomly using one. (Mahadev Konar via
sseth)
MAPREDUCE-3834. Changed MR AM to not add the same rack entry multiple times
into the container request table when multiple hosts for a split happen to
be on the same rack. (Siddarth Seth via vinodkv)
MAPREDUCE-3828. Ensure that urls in single-node mode are correct. (sseth
via acmurthy)
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES

View File

@ -32,7 +32,7 @@
<property name="Name" value="Hadoop-Mapred"/>
<property name="name" value="hadoop-${module}"/>
<!-- Need to change aop.xml project.version prop. synchronously -->
<property name="_version" value="0.23.1"/>
<property name="_version" value="0.23.2"/>
<property name="version" value="${_version}-SNAPSHOT"/>
<property name="final.name" value="${name}-${version}"/>
<property name="test.final.name" value="${name}-test-${version}"/>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-app</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-app</name>
<properties>
@ -112,7 +112,7 @@
<target>
<symlink link="${applink.base}.jar"
resource="mr-app.jar" failonerror="false"/>
<symlink link="${applink.base}-0.23.0-SNAPSHOT.jar"
<symlink link="${applink.base}-0.23.2-SNAPSHOT.jar"
resource="mr-app.jar" failonerror="false"/>
</target>
</configuration>

View File

@ -119,7 +119,8 @@ public class JobEndNotifier implements Configurable {
boolean success = false;
try {
Log.info("Job end notification trying " + urlToNotify);
HttpURLConnection conn = (HttpURLConnection) urlToNotify.openConnection();
HttpURLConnection conn =
(HttpURLConnection) urlToNotify.openConnection(proxyToUse);
conn.setConnectTimeout(5*1000);
conn.setReadTimeout(5*1000);
conn.setAllowUserInteraction(false);

View File

@ -35,6 +35,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -106,7 +107,7 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
/** Implementation of Job interface. Maintains the state machines of Job.
* The read and write calls use ReadWriteLock for concurrency.
*/
@SuppressWarnings({ "rawtypes", "deprecation", "unchecked" })
@SuppressWarnings({ "rawtypes", "unchecked" })
public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
EventHandler<JobEvent> {
@ -153,6 +154,10 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private boolean lazyTasksCopyNeeded = false;
volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>();
private Counters jobCounters = new Counters();
private Object fullCountersLock = new Object();
private Counters fullCounters = null;
private Counters finalMapCounters = null;
private Counters finalReduceCounters = null;
// FIXME:
//
// Can then replace task-level uber counters (MR-2424) with job-level ones
@ -473,11 +478,21 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
@Override
public Counters getAllCounters() {
Counters counters = new Counters();
readLock.lock();
try {
JobState state = getState();
if (state == JobState.ERROR || state == JobState.FAILED
|| state == JobState.KILLED || state == JobState.SUCCEEDED) {
this.mayBeConstructFinalFullCounters();
return fullCounters;
}
Counters counters = new Counters();
counters.incrAllCounters(jobCounters);
return incrTaskCounters(counters, tasks.values());
} finally {
readLock.unlock();
}
@ -525,17 +540,21 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
try {
JobState state = getState();
// jobFile can be null if the job is not yet inited.
String jobFile =
remoteJobConfFile == null ? "" : remoteJobConfFile.toString();
if (getState() == JobState.NEW) {
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f,
cleanupProgress, remoteJobConfFile.toString(), amInfos, isUber);
cleanupProgress, jobFile, amInfos, isUber);
}
computeProgress();
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
appSubmitTime, startTime, finishTime, setupProgress,
this.mapProgress, this.reduceProgress,
cleanupProgress, remoteJobConfFile.toString(), amInfos, isUber);
cleanupProgress, jobFile, amInfos, isUber);
} finally {
readLock.unlock();
}
@ -1143,26 +1162,49 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
// not be generated for KilledJobs, etc.
private static JobFinishedEvent createJobFinishedEvent(JobImpl job) {
Counters mapCounters = new Counters();
Counters reduceCounters = new Counters();
for (Task t : job.tasks.values()) {
Counters counters = t.getCounters();
switch (t.getType()) {
case MAP: mapCounters.incrAllCounters(counters); break;
case REDUCE: reduceCounters.incrAllCounters(counters); break;
}
}
job.mayBeConstructFinalFullCounters();
JobFinishedEvent jfe = new JobFinishedEvent(
job.oldJobId, job.finishTime,
job.succeededMapTaskCount, job.succeededReduceTaskCount,
job.failedMapTaskCount, job.failedReduceTaskCount,
mapCounters,
reduceCounters,
job.getAllCounters());
job.finalMapCounters,
job.finalReduceCounters,
job.fullCounters);
return jfe;
}
private void mayBeConstructFinalFullCounters() {
// Calculating full-counters. This should happen only once for the job.
synchronized (this.fullCountersLock) {
if (this.fullCounters != null) {
// Already constructed. Just return.
return;
}
this.constructFinalFullcounters();
}
}
@Private
public void constructFinalFullcounters() {
this.fullCounters = new Counters();
this.finalMapCounters = new Counters();
this.finalReduceCounters = new Counters();
this.fullCounters.incrAllCounters(jobCounters);
for (Task t : this.tasks.values()) {
Counters counters = t.getCounters();
switch (t.getType()) {
case MAP:
this.finalMapCounters.incrAllCounters(counters);
break;
case REDUCE:
this.finalReduceCounters.incrAllCounters(counters);
break;
}
this.fullCounters.incrAllCounters(counters);
}
}
// Task-start has been moved out of InitTransition, so this arc simply
// hardcodes 0 for both map and reduce finished tasks.
private static class KillNewJobTransition

View File

@ -19,19 +19,24 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -142,7 +147,7 @@ public abstract class TaskAttemptImpl implements
protected final JobConf conf;
protected final Path jobFile;
protected final int partition;
protected final EventHandler eventHandler;
protected EventHandler eventHandler;
private final TaskAttemptId attemptId;
private final Clock clock;
private final org.apache.hadoop.mapred.JobID oldJobId;
@ -1056,7 +1061,7 @@ public abstract class TaskAttemptImpl implements
}
}
private static class RequestContainerTransition implements
static class RequestContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
private final boolean rescheduled;
public RequestContainerTransition(boolean rescheduled) {
@ -1076,19 +1081,49 @@ public abstract class TaskAttemptImpl implements
taskAttempt.attemptId,
taskAttempt.resourceCapability));
} else {
int i = 0;
String[] racks = new String[taskAttempt.dataLocalHosts.length];
Set<String> racks = new HashSet<String>();
for (String host : taskAttempt.dataLocalHosts) {
racks[i++] = RackResolver.resolve(host).getNetworkLocation();
racks.add(RackResolver.resolve(host).getNetworkLocation());
}
taskAttempt.eventHandler.handle(
new ContainerRequestEvent(taskAttempt.attemptId,
taskAttempt.resourceCapability,
taskAttempt.dataLocalHosts, racks));
taskAttempt.eventHandler.handle(new ContainerRequestEvent(
taskAttempt.attemptId, taskAttempt.resourceCapability, taskAttempt
.resolveHosts(taskAttempt.dataLocalHosts), racks
.toArray(new String[racks.size()])));
}
}
}
protected String[] resolveHosts(String[] src) {
String[] result = new String[src.length];
for (int i = 0; i < src.length; i++) {
if (isIP(src[i])) {
result[i] = resolveHost(src[i]);
} else {
result[i] = src[i];
}
}
return result;
}
protected String resolveHost(String src) {
String result = src; // Fallback in case of failure.
try {
InetAddress addr = InetAddress.getByName(src);
result = addr.getHostName();
} catch (UnknownHostException e) {
LOG.warn("Failed to resolve address: " + src
+ ". Continuing to use the same.");
}
return result;
}
private static final Pattern ipPattern = // Pattern for matching ip
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}");
protected boolean isIP(String src) {
return ipPattern.matcher(src).matches();
}
private static class ContainerAssignedTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings({ "unchecked" })

View File

@ -18,6 +18,10 @@
package org.apache.hadoop.mapreduce.v2.app;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.spy;
import java.util.Iterator;
import junit.framework.Assert;
@ -35,6 +39,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.junit.Test;
/**
@ -175,6 +180,41 @@ public class TestMRApp {
app.waitForState(job, JobState.ERROR);
}
private final class MRAppWithSpiedJob extends MRApp {
private JobImpl spiedJob;
private MRAppWithSpiedJob(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@Override
protected Job createJob(Configuration conf) {
spiedJob = spy((JobImpl) super.createJob(conf));
((AppContext) getContext()).getAllJobs().put(spiedJob.getID(), spiedJob);
return spiedJob;
}
JobImpl getSpiedJob() {
return this.spiedJob;
}
}
@Test
public void testCountersOnJobFinish() throws Exception {
MRAppWithSpiedJob app =
new MRAppWithSpiedJob(1, 1, true, this.getClass().getName(), true);
JobImpl job = (JobImpl)app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
System.out.println(job.getAllCounters());
// Just call getCounters
job.getAllCounters();
job.getAllCounters();
// Should be called only once
verify(job, times(1)).constructFinalFullcounters();
}
@Test
public void checkJobStateTypeConversion() {
//verify that all states can be converted without
@ -200,5 +240,6 @@ public class TestMRApp {
t.testCommitPending();
t.testCompletedMapsForReduceSlowstart();
t.testJobError();
t.testCountersOnJobFinish();
}
}

View File

@ -18,48 +18,40 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.io.IOException;
import java.util.Map;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.Test;
import org.junit.Assert;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.any;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.Assert;
import org.junit.Test;
/**
* Tests various functions of the JobImpl class
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class TestJobImpl {
@Test
@ -106,7 +98,9 @@ public class TestJobImpl {
"for successful job",
JobImpl.checkJobCompleteSuccess(mockJob));
Assert.assertEquals("checkJobCompleteSuccess returns incorrect state",
JobImpl.checkJobCompleteSuccess(mockJob), JobState.SUCCEEDED);
JobImpl.checkJobCompleteSuccess(mockJob), JobState.SUCCEEDED);
}
@Test
@ -139,6 +133,7 @@ public class TestJobImpl {
t.testJobNoTasksTransition();
t.testCheckJobCompleteSuccess();
t.testCheckJobCompleteSuccessFailed();
t.testCheckAccess();
}
@Test

View File

@ -18,30 +18,54 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.SystemClock;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@SuppressWarnings("unchecked")
public class TestTaskAttempt{
@ -58,6 +82,96 @@ public class TestTaskAttempt{
testMRAppHistory(app);
}
@SuppressWarnings("rawtypes")
@Test
public void testSingleRackRequest() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct =
new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "host1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo =
new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt =
createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(mockTaskAttempt, mockTAEvent);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
ContainerRequestEvent cre =
(ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedRacks = cre.getRacks();
//Only a single occurance of /DefaultRack
assertEquals(1, requestedRacks.length);
}
@SuppressWarnings("rawtypes")
@Test
public void testHostResolveAttempt() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct =
new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "192.168.1.1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo =
new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt =
createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptImpl spyTa = spy(mockTaskAttempt);
when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(spyTa, mockTAEvent);
verify(spyTa).resolveHost(hosts[0]);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
Map<String, Boolean> expected = new HashMap<String, Boolean>();
expected.put("host1", true);
expected.put("host2", true);
expected.put("host3", true);
ContainerRequestEvent cre =
(ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedHosts = cre.getHosts();
for (String h : requestedHosts) {
expected.remove(h);
}
assertEquals(0, expected.size());
}
@SuppressWarnings("rawtypes")
private TaskAttemptImpl createMapTaskAttemptImplForTest(
EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo) {
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
Path jobFile = mock(Path.class);
JobConf jobConf = new JobConf();
OutputCommitter outputCommitter = mock(OutputCommitter.class);
Clock clock = new SystemClock();
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
taskSplitMetaInfo, jobConf, taListener, outputCommitter, null,
null, clock);
return taImpl;
}
private void testMRAppHistory(MRApp app) throws Exception {
Configuration conf = new Configuration();
Job job = app.submit(conf);

View File

@ -59,7 +59,7 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@SuppressWarnings({ "rawtypes", "deprecation" })
@SuppressWarnings("rawtypes")
public class TestTaskImpl {
private static final Log LOG = LogFactory.getLog(TestTaskImpl.class);

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-common</name>
<properties>

View File

@ -24,6 +24,7 @@ import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Calendar;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@ -46,6 +47,9 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
public class JobHistoryUtils {
/**
@ -110,6 +114,9 @@ public class JobHistoryUtils {
public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX);
private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d";
private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults();
private static final Joiner JOINER = Joiner.on("");
private static final PathFilter CONF_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
@ -478,8 +485,16 @@ public class JobHistoryUtils {
public static String getHistoryUrl(Configuration conf, ApplicationId appId)
throws UnknownHostException {
//construct the history url for job
String hsAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
String addr = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
Iterator<String> it = ADDR_SPLITTER.split(addr).iterator();
it.next(); // ignore the bind host
String port = it.next();
// Use hs address to figure out the host for webapp
addr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
String host = ADDR_SPLITTER.split(addr).iterator().next();
String hsAddress = JOINER.join(host, ":", port);
InetSocketAddress address = NetUtils.createSocketAddr(
hsAddress, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-core</name>
<properties>

View File

@ -141,7 +141,7 @@ class MapTask extends Task {
private TaskReporter reporter;
private long bytesInPrev = -1;
private long bytesInCurr = -1;
private final Statistics fsStats;
private final List<Statistics> fsStats;
TrackedRecordReader(TaskReporter reporter, JobConf job)
throws IOException{
@ -149,7 +149,7 @@ class MapTask extends Task {
fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
this.reporter = reporter;
Statistics matchedStats = null;
List<Statistics> matchedStats = null;
if (this.reporter.getInputSplit() instanceof FileSplit) {
matchedStats = getFsStatistics(((FileSplit) this.reporter
.getInputSplit()).getPath(), job);
@ -210,8 +210,13 @@ class MapTask extends Task {
return reporter;
}
private long getInputBytes(Statistics stats) {
return stats == null ? 0 : stats.getBytesRead();
private long getInputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesRead = 0;
for (Statistics stat: stats) {
bytesRead = bytesRead + stat.getBytesRead();
}
return bytesRead;
}
}
@ -426,7 +431,7 @@ class MapTask extends Task {
private final org.apache.hadoop.mapreduce.Counter inputRecordCounter;
private final org.apache.hadoop.mapreduce.Counter fileInputByteCounter;
private final TaskReporter reporter;
private final Statistics fsStats;
private final List<Statistics> fsStats;
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
@ -439,7 +444,7 @@ class MapTask extends Task {
this.fileInputByteCounter = reporter
.getCounter(FileInputFormatCounter.BYTES_READ);
Statistics matchedStats = null;
List <Statistics> matchedStats = null;
if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
.getPath(), taskContext.getConfiguration());
@ -498,8 +503,13 @@ class MapTask extends Task {
return result;
}
private long getInputBytes(Statistics stats) {
return stats == null ? 0 : stats.getBytesRead();
private long getInputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesRead = 0;
for (Statistics stat: stats) {
bytesRead = bytesRead + stat.getBytesRead();
}
return bytesRead;
}
}
@ -554,7 +564,7 @@ class MapTask extends Task {
private final Counters.Counter mapOutputRecordCounter;
private final Counters.Counter fileOutputByteCounter;
private final Statistics fsStats;
private final List<Statistics> fsStats;
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,
@ -566,7 +576,7 @@ class MapTask extends Task {
fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
Statistics matchedStats = null;
List<Statistics> matchedStats = null;
if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getOutputPath(taskContext), taskContext.getConfiguration());
@ -603,8 +613,13 @@ class MapTask extends Task {
}
}
private long getOutputBytes(Statistics stats) {
return stats == null ? 0 : stats.getBytesWritten();
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}
@ -735,7 +750,7 @@ class MapTask extends Task {
private final Counters.Counter mapOutputRecordCounter;
private final Counters.Counter fileOutputByteCounter;
private final Statistics fsStats;
private final List<Statistics> fsStats;
@SuppressWarnings("unchecked")
public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical,
@ -750,7 +765,7 @@ class MapTask extends Task {
fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
Statistics matchedStats = null;
List<Statistics> matchedStats = null;
if (outputFormat instanceof FileOutputFormat) {
matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
}
@ -785,8 +800,13 @@ class MapTask extends Task {
mapOutputRecordCounter.increment(1);
}
private long getOutputBytes(Statistics stats) {
return stats == null ? 0 : stats.getBytesWritten();
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}

View File

@ -476,14 +476,14 @@ public class ReduceTask extends Task {
private final RecordWriter<K, V> real;
private final org.apache.hadoop.mapred.Counters.Counter reduceOutputCounter;
private final org.apache.hadoop.mapred.Counters.Counter fileOutputByteCounter;
private final Statistics fsStats;
private final List<Statistics> fsStats;
@SuppressWarnings({ "deprecation", "unchecked" })
public OldTrackingRecordWriter(ReduceTask reduce, JobConf job,
TaskReporter reporter, String finalName) throws IOException {
this.reduceOutputCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter;
Statistics matchedStats = null;
List<Statistics> matchedStats = null;
if (job.getOutputFormat() instanceof FileOutputFormat) {
matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
}
@ -514,8 +514,13 @@ public class ReduceTask extends Task {
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
private long getOutputBytes(Statistics stats) {
return stats == null ? 0 : stats.getBytesWritten();
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}
@ -524,7 +529,7 @@ public class ReduceTask extends Task {
private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real;
private final org.apache.hadoop.mapreduce.Counter outputRecordCounter;
private final org.apache.hadoop.mapreduce.Counter fileOutputByteCounter;
private final Statistics fsStats;
private final List<Statistics> fsStats;
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,
@ -533,7 +538,7 @@ public class ReduceTask extends Task {
this.outputRecordCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter;
Statistics matchedStats = null;
List<Statistics> matchedStats = null;
if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getOutputPath(taskContext), taskContext.getConfiguration());
@ -566,8 +571,13 @@ public class ReduceTask extends Task {
outputRecordCounter.increment(1);
}
private long getOutputBytes(Statistics stats) {
return stats == null ? 0 : stats.getBytesWritten();
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}

View File

@ -24,6 +24,7 @@ import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@ -326,14 +327,13 @@ abstract public class Task implements Writable, Configurable {
* the path.
* @return a Statistics instance, or null if none is found for the scheme.
*/
protected static Statistics getFsStatistics(Path path, Configuration conf) throws IOException {
Statistics matchedStats = null;
protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
path = path.getFileSystem(conf).makeQualified(path);
String scheme = path.toUri().getScheme();
for (Statistics stats : FileSystem.getAllStatistics()) {
if (stats.getScheme().equals(scheme)) {
matchedStats = stats;
break;
matchedStats.add(stats);
}
}
return matchedStats;
@ -866,41 +866,53 @@ abstract public class Task implements Writable, Configurable {
* system and only creates the counters when they are needed.
*/
class FileSystemStatisticUpdater {
private FileSystem.Statistics stats;
private List<FileSystem.Statistics> stats;
private Counters.Counter readBytesCounter, writeBytesCounter,
readOpsCounter, largeReadOpsCounter, writeOpsCounter;
FileSystemStatisticUpdater(FileSystem.Statistics stats) {
private String scheme;
FileSystemStatisticUpdater(List<FileSystem.Statistics> stats, String scheme) {
this.stats = stats;
this.scheme = scheme;
}
void updateCounters() {
String scheme = stats.getScheme();
if (readBytesCounter == null) {
readBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_READ);
}
readBytesCounter.setValue(stats.getBytesRead());
if (writeBytesCounter == null) {
writeBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_WRITTEN);
}
writeBytesCounter.setValue(stats.getBytesWritten());
if (readOpsCounter == null) {
readOpsCounter = counters.findCounter(scheme,
FileSystemCounter.READ_OPS);
}
readOpsCounter.setValue(stats.getReadOps());
if (largeReadOpsCounter == null) {
largeReadOpsCounter = counters.findCounter(scheme,
FileSystemCounter.LARGE_READ_OPS);
}
largeReadOpsCounter.setValue(stats.getLargeReadOps());
if (writeOpsCounter == null) {
writeOpsCounter = counters.findCounter(scheme,
FileSystemCounter.WRITE_OPS);
}
writeOpsCounter.setValue(stats.getWriteOps());
long readBytes = 0;
long writeBytes = 0;
long readOps = 0;
long largeReadOps = 0;
long writeOps = 0;
for (FileSystem.Statistics stat: stats) {
readBytes = readBytes + stat.getBytesRead();
writeBytes = writeBytes + stat.getBytesWritten();
readOps = readOps + stat.getReadOps();
largeReadOps = largeReadOps + stat.getLargeReadOps();
writeOps = writeOps + stat.getWriteOps();
}
readBytesCounter.setValue(readBytes);
writeBytesCounter.setValue(writeBytes);
readOpsCounter.setValue(readOps);
largeReadOpsCounter.setValue(largeReadOps);
writeOpsCounter.setValue(writeOps);
}
}
@ -911,16 +923,28 @@ abstract public class Task implements Writable, Configurable {
new HashMap<String, FileSystemStatisticUpdater>();
private synchronized void updateCounters() {
Map<String, List<FileSystem.Statistics>> map = new
HashMap<String, List<FileSystem.Statistics>>();
for(Statistics stat: FileSystem.getAllStatistics()) {
String uriScheme = stat.getScheme();
FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme);
if(updater==null) {//new FileSystem has been found in the cache
updater = new FileSystemStatisticUpdater(stat);
statisticUpdaters.put(uriScheme, updater);
if (map.containsKey(uriScheme)) {
List<FileSystem.Statistics> list = map.get(uriScheme);
list.add(stat);
} else {
List<FileSystem.Statistics> list = new ArrayList<FileSystem.Statistics>();
list.add(stat);
map.put(uriScheme, list);
}
updater.updateCounters();
}
for (Map.Entry<String, List<FileSystem.Statistics>> entry: map.entrySet()) {
FileSystemStatisticUpdater updater = statisticUpdaters.get(entry.getKey());
if(updater==null) {//new FileSystem has been found in the cache
updater = new FileSystemStatisticUpdater(entry.getValue(), entry.getKey());
statisticUpdaters.put(entry.getKey(), updater);
}
updater.updateCounters();
}
gcUpdater.incrementGcCounter();
updateResourceCounters();
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.Writable;
/**
@ -73,6 +74,7 @@ public interface Counter extends Writable {
*/
void increment(long incr);
@Private
/**
* Return the underlying object if this is a facade.
* @return the undelying object.

View File

@ -22,11 +22,8 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.Text;
@ -34,6 +31,8 @@ import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.util.ResourceBundles;
import com.google.common.collect.Iterators;
/**
* An abstract class to provide common implementation of the
* generic counter group in both mapred and mapreduce package.
@ -46,7 +45,8 @@ public abstract class AbstractCounterGroup<T extends Counter>
private final String name;
private String displayName;
private final Map<String, T> counters = Maps.newTreeMap();
private final ConcurrentMap<String, T> counters =
new ConcurrentSkipListMap<String, T>();
private final Limits limits;
public AbstractCounterGroup(String name, String displayName,
@ -80,7 +80,7 @@ public abstract class AbstractCounterGroup<T extends Counter>
@Override
public synchronized T addCounter(String counterName, String displayName,
long value) {
String saveName = limits.filterCounterName(counterName);
String saveName = Limits.filterCounterName(counterName);
T counter = findCounterImpl(saveName, false);
if (counter == null) {
return addCounterImpl(saveName, displayName, value);
@ -97,7 +97,9 @@ public abstract class AbstractCounterGroup<T extends Counter>
@Override
public synchronized T findCounter(String counterName, String displayName) {
String saveName = limits.filterCounterName(counterName);
// Take lock to avoid two threads not finding a counter and trying to add
// the same counter.
String saveName = Limits.filterCounterName(counterName);
T counter = findCounterImpl(saveName, false);
if (counter == null) {
return addCounterImpl(saveName, displayName, 0);
@ -106,10 +108,12 @@ public abstract class AbstractCounterGroup<T extends Counter>
}
@Override
public synchronized T findCounter(String counterName, boolean create) {
return findCounterImpl(limits.filterCounterName(counterName), create);
public T findCounter(String counterName, boolean create) {
return findCounterImpl(Limits.filterCounterName(counterName), create);
}
// Lock the object. Cannot simply use concurrent constructs on the counters
// data-structure (like putIfAbsent) because of localization, limits etc.
private synchronized T findCounterImpl(String counterName, boolean create) {
T counter = counters.get(counterName);
if (counter == null && create) {
@ -142,8 +146,8 @@ public abstract class AbstractCounterGroup<T extends Counter>
protected abstract T newCounter();
@Override
public synchronized Iterator<T> iterator() {
return ImmutableSet.copyOf(counters.values()).iterator();
public Iterator<T> iterator() {
return counters.values().iterator();
}
/**

View File

@ -18,19 +18,18 @@
package org.apache.hadoop.mapreduce.counters;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.getFrameworkGroupId;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.isFrameworkGroup;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ConcurrentSkipListMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
@ -40,7 +39,10 @@ import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.TaskCounter;
import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.*;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
/**
* An abstract class to provide common implementation for the Counters
@ -61,8 +63,10 @@ public abstract class AbstractCounters<C extends Counter,
* A cache from enum values to the associated counter.
*/
private Map<Enum<?>, C> cache = Maps.newIdentityHashMap();
private Map<String, G> fgroups = Maps.newTreeMap(); // framework & fs groups
private Map<String, G> groups = Maps.newTreeMap(); // other groups
//framework & fs groups
private Map<String, G> fgroups = new ConcurrentSkipListMap<String, G>();
// other groups
private Map<String, G> groups = new ConcurrentSkipListMap<String, G>();
private final CounterGroupFactory<C, G> groupFactory;
// For framework counter serialization without strings
@ -181,14 +185,13 @@ public abstract class AbstractCounters<C extends Counter,
* @return Set of counter names.
*/
public synchronized Iterable<String> getGroupNames() {
return Iterables.concat(ImmutableSet.copyOf(fgroups.keySet()),
ImmutableSet.copyOf(groups.keySet()));
return Iterables.concat(fgroups.keySet(), groups.keySet());
}
@Override
public synchronized Iterator<G> iterator() {
return Iterators.concat(ImmutableSet.copyOf(fgroups.values()).iterator(),
ImmutableSet.copyOf(groups.values()).iterator());
public Iterator<G> iterator() {
return Iterators.concat(fgroups.values().iterator(),
groups.values().iterator());
}
/**
@ -216,7 +219,7 @@ public abstract class AbstractCounters<C extends Counter,
private String filterGroupName(String oldName) {
String newName = legacyMap.get(oldName);
if (newName == null) {
return limits.filterGroupName(oldName);
return Limits.filterGroupName(oldName);
}
LOG.warn("Group "+ oldName +" is deprecated. Use "+ newName +" instead");
return newName;

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.mapreduce.counters;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Counter;
@ -99,6 +100,7 @@ public interface CounterGroupBase<T extends Counter>
*/
void incrAllCounters(CounterGroupBase<T> rightGroup);
@Private
/**
* Exposes the underlying group type if a facade.
* @return the underlying object that this object is wrapping up.

View File

@ -42,11 +42,11 @@ public class Limits {
return name.length() > maxLen ? name.substring(0, maxLen - 1) : name;
}
public String filterCounterName(String name) {
public static String filterCounterName(String name) {
return filterName(name, COUNTER_NAME_MAX);
}
public String filterGroupName(String name) {
public static String filterGroupName(String name) {
return filterName(name, GROUP_NAME_MAX);
}

View File

@ -1262,4 +1262,18 @@
to the RM to fetch Application Status.</description>
</property>
<!-- jobhistory properties -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>0.0.0.0:10020</value>
<description>MapReduce JobHistory Server host:port</description>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>0.0.0.0:19888</value>
<description>MapReduce JobHistory Server Web UI host:port</description>
</property>
</configuration>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-hs</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-hs</name>
<properties>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-jobclient</name>
<properties>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-mapreduce-client</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-shuffle</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client-shuffle</name>
<properties>

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-mapreduce-client</name>
<packaging>pom</packaging>

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-examples</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop MapReduce Examples</description>
<name>Apache Hadoop MapReduce Examples</name>
<packaging>jar</packaging>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-api</name>
<properties>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn-applications</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-applications-distributedshell</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-applications-distributedshell</name>
<properties>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-applications</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-applications</name>
<packaging>pom</packaging>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-common</name>
<properties>

View File

@ -20,8 +20,13 @@ package org.apache.hadoop.yarn.conf;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
public class YarnConfiguration extends Configuration {
private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults();
@ -543,7 +548,25 @@ public class YarnConfiguration extends Configuration {
// Use apps manager address to figure out the host for webapp
addr = conf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS);
String host = ADDR_SPLITTER.split(addr).iterator().next();
return JOINER.join(host, ":", port);
String rmAddress = JOINER.join(host, ":", port);
InetSocketAddress address = NetUtils.createSocketAddr(
rmAddress, DEFAULT_RM_WEBAPP_PORT, RM_WEBAPP_ADDRESS);
StringBuffer sb = new StringBuffer();
InetAddress resolved = address.getAddress();
if (resolved == null || resolved.isAnyLocalAddress() ||
resolved.isLoopbackAddress()) {
String lh = host;
try {
lh = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
//Ignore and fallback.
}
sb.append(lh);
} else {
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
return sb.toString();
}
public static String getRMWebAppURL(Configuration conf) {

View File

@ -1,24 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.conf;
import junit.framework.Assert;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -32,20 +31,25 @@ public class TestYarnConfiguration {
String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf);
// shouldn't have a "/" on the end of the url as all the other uri routinnes
// specifically add slashes and Jetty doesn't handle double slashes.
Assert.assertEquals("RM Web Url is not correct", "http://0.0.0.0:8088",
Assert.assertNotSame("RM Web Url is not correct", "http://0.0.0.0:8088",
rmWebUrl);
}
@Test
public void testRMWebUrlSpecified() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
// seems a bit odd but right now we are forcing webapp for RM to be RM_ADDRESS
// seems a bit odd but right now we are forcing webapp for RM to be
// RM_ADDRESS
// for host and use the port from the RM_WEBAPP_ADDRESS
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "footesting:99110");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "fortesting:24543");
conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999");
String rmWebUrl = YarnConfiguration.getRMWebAppURL(conf);
Assert.assertEquals("RM Web Url is not correct", "http://rmtesting:99110",
rmWebUrl);
String[] parts = rmWebUrl.split(":");
Assert.assertEquals("RM Web URL Port is incrrect", 24543,
Integer.valueOf(parts[parts.length - 1]).intValue());
Assert.assertNotSame(
"RM Web Url not resolved correctly. Should not be rmtesting",
"http://rmtesting:24543", rmWebUrl);
}
}

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-common</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-common</name>
<properties>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-nodemanager</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-nodemanager</name>
<properties>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-resourcemanager</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-resourcemanager</name>
<properties>

View File

@ -368,6 +368,12 @@ public class ParentQueue implements CSQueue {
ParentQueue parentQueue = (ParentQueue)queue;
// Set new configs
setupQueueConfigs(clusterResource,
parentQueue.capacity, parentQueue.absoluteCapacity,
parentQueue.maximumCapacity, parentQueue.absoluteMaxCapacity,
parentQueue.state, parentQueue.acls);
// Re-configure existing child queues and add new ones
// The CS has already checked to ensure all existing child queues are present!
Map<String, CSQueue> currentChildQueues = getQueues(childQueues);
@ -389,12 +395,6 @@ public class ParentQueue implements CSQueue {
// Re-sort all queues
childQueues.clear();
childQueues.addAll(currentChildQueues.values());
// Set new configs
setupQueueConfigs(clusterResource,
parentQueue.capacity, parentQueue.absoluteCapacity,
parentQueue.maximumCapacity, parentQueue.absoluteMaxCapacity,
parentQueue.state, parentQueue.acls);
}
Map<String, CSQueue> getQueues(Set<CSQueue> queues) {

View File

@ -81,7 +81,7 @@ class AppsBlock extends HtmlBlock {
td().
br().$title(startTime)._()._(startTime)._().
td().
br().$title(startTime)._()._(finishTime)._().
br().$title(finishTime)._()._(finishTime)._().
td(appInfo.getState()).
td(appInfo.getFinalStatus()).
td().

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
import org.apache.hadoop.yarn.webapp.ToJSON;
import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render;
@ -60,7 +61,9 @@ class AppsList implements ToJSON {
&& app.getState() != RMAppState.valueOf(requiredAppState)) {
continue;
}
AppInfo appInfo = new AppInfo(app, false);
AppInfo appInfo = new AppInfo(app, true);
String startTime = Times.format(appInfo.getStartTime());
String finishTime = Times.format(appInfo.getFinishTime());
if (first) {
first = false;
} else {
@ -72,15 +75,15 @@ class AppsList implements ToJSON {
appInfo.getAppId()).append(_SEP).
append(escapeHtml(appInfo.getUser())).append(_SEP).
append(escapeJavaScript(escapeHtml(appInfo.getName()))).append(_SEP).
append(escapeHtml(appInfo.getQueue())).append(_SEP).
append(escapeHtml(appInfo.getQueue())).append(_SEP);
appendSortable(out, startTime).append(startTime).append(_SEP);
appendSortable(out, finishTime).append(finishTime).append(_SEP).
append(appInfo.getState()).append(_SEP).
append(appInfo.getFinalStatus()).append(_SEP);
appendProgressBar(out, appInfo.getProgress()).append(_SEP);
appendLink(out, appInfo.getTrackingUI(), rc.prefix(),
!appInfo.isTrackingUrlReady() ?
"#" : appInfo.getTrackingUrlPretty()).
append(_SEP).append(escapeJavaScript(escapeHtml(
appInfo.getNote()))).
append("\"]");
}
out.append(']');

View File

@ -18,11 +18,12 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.List;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -47,6 +48,21 @@ import org.junit.Test;
public class TestCapacityScheduler {
private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
private static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
private static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
private static final String A1 = A + ".a1";
private static final String A2 = A + ".a2";
private static final String B1 = B + ".b1";
private static final String B2 = B + ".b2";
private static final String B3 = B + ".b3";
private static int A_CAPACITY = 10;
private static int B_CAPACITY = 90;
private static int A1_CAPACITY = 30;
private static int A2_CAPACITY = 70;
private static int B1_CAPACITY = 50;
private static int B2_CAPACITY = 30;
private static int B3_CAPACITY = 20;
private ResourceManager resourceManager = null;
@Before
@ -200,35 +216,102 @@ public class TestCapacityScheduler {
conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"});
conf.setCapacity(CapacitySchedulerConfiguration.ROOT, 100);
final String A = CapacitySchedulerConfiguration.ROOT + ".a";
conf.setCapacity(A, 10);
final String B = CapacitySchedulerConfiguration.ROOT + ".b";
conf.setCapacity(B, 90);
conf.setCapacity(A, A_CAPACITY);
conf.setCapacity(B, B_CAPACITY);
// Define 2nd-level queues
final String A1 = A + ".a1";
final String A2 = A + ".a2";
conf.setQueues(A, new String[] {"a1", "a2"});
conf.setCapacity(A1, 30);
conf.setCapacity(A1, A1_CAPACITY);
conf.setUserLimitFactor(A1, 100.0f);
conf.setCapacity(A2, 70);
conf.setCapacity(A2, A2_CAPACITY);
conf.setUserLimitFactor(A2, 100.0f);
final String B1 = B + ".b1";
final String B2 = B + ".b2";
final String B3 = B + ".b3";
conf.setQueues(B, new String[] {"b1", "b2", "b3"});
conf.setCapacity(B1, 50);
conf.setCapacity(B1, B1_CAPACITY);
conf.setUserLimitFactor(B1, 100.0f);
conf.setCapacity(B2, 30);
conf.setCapacity(B2, B2_CAPACITY);
conf.setUserLimitFactor(B2, 100.0f);
conf.setCapacity(B3, 20);
conf.setCapacity(B3, B3_CAPACITY);
conf.setUserLimitFactor(B3, 100.0f);
LOG.info("Setup top-level queues a and b");
}
@Test
public void testRefreshQueues() throws Exception {
CapacityScheduler cs = new CapacityScheduler();
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
setupQueueConfiguration(conf);
cs.reinitialize(conf, null, null);
checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY);
conf.setCapacity(A, 80);
conf.setCapacity(B, 20);
cs.reinitialize(conf, null,null);
checkQueueCapacities(cs, 80, 20);
}
private void checkQueueCapacities(CapacityScheduler cs,
int capacityA, int capacityB) {
CSQueue rootQueue = cs.getRootQueue();
CSQueue queueA = findQueue(rootQueue, A);
CSQueue queueB = findQueue(rootQueue, B);
CSQueue queueA1 = findQueue(queueA, A1);
CSQueue queueA2 = findQueue(queueA, A2);
CSQueue queueB1 = findQueue(queueB, B1);
CSQueue queueB2 = findQueue(queueB, B2);
CSQueue queueB3 = findQueue(queueB, B3);
float capA = capacityA / 100.0f;
float capB = capacityB / 100.0f;
checkQueueCapacity(queueA, capA, capA, 1.0f, 1.0f);
checkQueueCapacity(queueB, capB, capB, 1.0f, 1.0f);
checkQueueCapacity(queueA1, A1_CAPACITY / 100.0f,
(A1_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
checkQueueCapacity(queueA2, (float)A2_CAPACITY / 100.0f,
(A2_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
checkQueueCapacity(queueB1, (float)B1_CAPACITY / 100.0f,
(B1_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
checkQueueCapacity(queueB2, (float)B2_CAPACITY / 100.0f,
(B2_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
checkQueueCapacity(queueB3, (float)B3_CAPACITY / 100.0f,
(B3_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
}
private void checkQueueCapacity(CSQueue q, float expectedCapacity,
float expectedAbsCapacity, float expectedMaxCapacity,
float expectedAbsMaxCapacity) {
final float epsilon = 1e-5f;
assertEquals("capacity", expectedCapacity, q.getCapacity(), epsilon);
assertEquals("absolute capacity", expectedAbsCapacity,
q.getAbsoluteCapacity(), epsilon);
assertEquals("maximum capacity", expectedMaxCapacity,
q.getMaximumCapacity(), epsilon);
assertEquals("absolute maximum capacity", expectedAbsMaxCapacity,
q.getAbsoluteMaximumCapacity(), epsilon);
}
private CSQueue findQueue(CSQueue root, String queuePath) {
if (root.getQueuePath().equals(queuePath)) {
return root;
}
List<CSQueue> childQueues = root.getChildQueues();
if (childQueues != null) {
for (CSQueue q : childQueues) {
if (queuePath.startsWith(q.getQueuePath())) {
CSQueue result = findQueue(q, queuePath);
if (result != null) {
return result;
}
}
}
}
return null;
}
private void checkApplicationResourceUsage(int expected,
Application application) {
Assert.assertEquals(expected, application.getUsedResources().getMemory());

View File

@ -16,11 +16,11 @@
<parent>
<artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-tests</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server-tests</name>
<properties>

View File

@ -16,7 +16,7 @@
<parent>
<artifactId>hadoop-yarn-server</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-server</name>
<packaging>pom</packaging>

View File

@ -16,12 +16,12 @@
<parent>
<artifactId>hadoop-yarn</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-site</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<name>hadoop-yarn-site</name>
<properties>

View File

@ -11,7 +11,7 @@
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project name="Apache Hadoop 0.23">
<project name="Apache Hadoop ${project.version}">
<skin>
<groupId>org.apache.maven.skins</groupId>

View File

@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
<packaging>pom</packaging>
<name>hadoop-yarn</name>

View File

@ -28,7 +28,7 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>0.23.1-SNAPSHOT</version>
<version>0.23.2-SNAPSHOT</version>
</dependency>
</dependencies>
</project>

Some files were not shown because too many files have changed in this diff Show More