HADOOP-4687. Merge -r 784663:785643 from trunk to branch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/HADOOP-4687/core@785794 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c2d07b4fcd
commit
b12d765467
22
CHANGES.txt
22
CHANGES.txt
|
@ -68,6 +68,9 @@ Trunk (unreleased changes)
|
||||||
HADOOP-5698. Change org.apache.hadoop.examples.MultiFileWordCount to
|
HADOOP-5698. Change org.apache.hadoop.examples.MultiFileWordCount to
|
||||||
use new mapreduce api. (Amareshwari Sriramadasu via sharad)
|
use new mapreduce api. (Amareshwari Sriramadasu via sharad)
|
||||||
|
|
||||||
|
HADOOP-5913. Provide ability to an administrator to stop and start
|
||||||
|
job queues. (Rahul Kumar Singh and Hemanth Yamijala via yhemanth)
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
HADOOP-4268. Change fsck to use ClientProtocol methods so that the
|
HADOOP-4268. Change fsck to use ClientProtocol methods so that the
|
||||||
|
@ -149,6 +152,9 @@ Trunk (unreleased changes)
|
||||||
HADOOP-5170. Allows jobs to set max maps/reduces per-node and per-cluster.
|
HADOOP-5170. Allows jobs to set max maps/reduces per-node and per-cluster.
|
||||||
(Matei Zaharia via ddas)
|
(Matei Zaharia via ddas)
|
||||||
|
|
||||||
|
HADOOP-5897. Add name-node metrics to capture java heap usage.
|
||||||
|
(Suresh Srinivas via shv)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HADOOP-4565. Added CombineFileInputFormat to use data locality information
|
HADOOP-4565. Added CombineFileInputFormat to use data locality information
|
||||||
|
@ -444,6 +450,12 @@ Trunk (unreleased changes)
|
||||||
HADOOP-5938. Change org.apache.hadoop.mapred.jobcontrol to use new
|
HADOOP-5938. Change org.apache.hadoop.mapred.jobcontrol to use new
|
||||||
api. (Amareshwari Sriramadasu via sharad)
|
api. (Amareshwari Sriramadasu via sharad)
|
||||||
|
|
||||||
|
HADOOP-2141. Improves the speculative execution heuristic. The heuristic
|
||||||
|
is currently based on the progress-rates of tasks and the expected time
|
||||||
|
to complete. Also, statistics about trackers are collected, and speculative
|
||||||
|
tasks are not given to the ones deduced to be slow.
|
||||||
|
(Andy Konwinski and ddas)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-5595. NameNode does not need to run a replicator to choose a
|
HADOOP-5595. NameNode does not need to run a replicator to choose a
|
||||||
|
@ -820,6 +832,9 @@ Trunk (unreleased changes)
|
||||||
LD_LIBRARY_PATH and other environment variables.
|
LD_LIBRARY_PATH and other environment variables.
|
||||||
(Sreekanth Ramakrishnan via yhemanth)
|
(Sreekanth Ramakrishnan via yhemanth)
|
||||||
|
|
||||||
|
HADOOP-4041. IsolationRunner does not work as documented.
|
||||||
|
(Philip Zeyliger via tomwhite)
|
||||||
|
|
||||||
Release 0.20.1 - Unreleased
|
Release 0.20.1 - Unreleased
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -948,6 +963,9 @@ Release 0.20.1 - Unreleased
|
||||||
(usually HDFS) is started at nearly the same time as the JobTracker.
|
(usually HDFS) is started at nearly the same time as the JobTracker.
|
||||||
(Amar Kamat via ddas)
|
(Amar Kamat via ddas)
|
||||||
|
|
||||||
|
HADOOP-5920. Fixes a testcase failure for TestJobHistory.
|
||||||
|
(Amar Kamat via ddas)
|
||||||
|
|
||||||
Release 0.20.0 - 2009-04-15
|
Release 0.20.0 - 2009-04-15
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -2982,6 +3000,10 @@ Release 0.18.4 - Unreleased
|
||||||
|
|
||||||
HADOOP-5644. Namenode is stuck in safe mode. (suresh Srinivas via hairong)
|
HADOOP-5644. Namenode is stuck in safe mode. (suresh Srinivas via hairong)
|
||||||
|
|
||||||
|
HADOOP-6017. Lease Manager in NameNode does not handle certain characters
|
||||||
|
in filenames. This results in fatal errors in Secondary NameNode and while
|
||||||
|
restrating NameNode. (Tsz Wo (Nicholas), SZE via rangadi)
|
||||||
|
|
||||||
Release 0.18.3 - 2009-01-27
|
Release 0.18.3 - 2009-01-27
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
* files. The way it works is that it is kept track what disk was last
|
* files. The way it works is that it is kept track what disk was last
|
||||||
* allocated for a file write. For the current request, the next disk from
|
* allocated for a file write. For the current request, the next disk from
|
||||||
* the set of disks would be allocated if the free space on the disk is
|
* the set of disks would be allocated if the free space on the disk is
|
||||||
* sufficient enough to accomodate the file that is being considered for
|
* sufficient enough to accommodate the file that is being considered for
|
||||||
* creation. If the space requirements cannot be met, the next disk in order
|
* creation. If the space requirements cannot be met, the next disk in order
|
||||||
* would be tried and so on till a disk is found with sufficient capacity.
|
* would be tried and so on till a disk is found with sufficient capacity.
|
||||||
* Once a disk with sufficient space is identified, a check is done to make
|
* Once a disk with sufficient space is identified, a check is done to make
|
||||||
|
@ -69,6 +69,9 @@ public class LocalDirAllocator {
|
||||||
new TreeMap<String, AllocatorPerContext>();
|
new TreeMap<String, AllocatorPerContext>();
|
||||||
private String contextCfgItemName;
|
private String contextCfgItemName;
|
||||||
|
|
||||||
|
/** Used when size of file to be allocated is unknown. */
|
||||||
|
public static final int SIZE_UNKNOWN = -1;
|
||||||
|
|
||||||
/**Create an allocator object
|
/**Create an allocator object
|
||||||
* @param contextCfgItemName
|
* @param contextCfgItemName
|
||||||
*/
|
*/
|
||||||
|
@ -105,10 +108,11 @@ public class LocalDirAllocator {
|
||||||
*/
|
*/
|
||||||
public Path getLocalPathForWrite(String pathStr,
|
public Path getLocalPathForWrite(String pathStr,
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
return getLocalPathForWrite(pathStr, -1, conf);
|
return getLocalPathForWrite(pathStr, SIZE_UNKNOWN, conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get a path from the local FS. Pass size as -1 if not known apriori. We
|
/** Get a path from the local FS. Pass size as
|
||||||
|
* SIZE_UNKNOWN if not known apriori. We
|
||||||
* round-robin over the set of disks (via the configured dirs) and return
|
* round-robin over the set of disks (via the configured dirs) and return
|
||||||
* the first complete path which has enough space
|
* the first complete path which has enough space
|
||||||
* @param pathStr the requested path (this will be created on the first
|
* @param pathStr the requested path (this will be created on the first
|
||||||
|
@ -274,7 +278,7 @@ public class LocalDirAllocator {
|
||||||
*/
|
*/
|
||||||
public synchronized Path getLocalPathForWrite(String path,
|
public synchronized Path getLocalPathForWrite(String path,
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
return getLocalPathForWrite(path, -1, conf);
|
return getLocalPathForWrite(path, SIZE_UNKNOWN, conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get a path from the local FS. If size is known, we go
|
/** Get a path from the local FS. If size is known, we go
|
||||||
|
@ -296,7 +300,7 @@ public class LocalDirAllocator {
|
||||||
}
|
}
|
||||||
Path returnPath = null;
|
Path returnPath = null;
|
||||||
|
|
||||||
if(size == -1) { //do roulette selection: pick dir with probability
|
if(size == SIZE_UNKNOWN) { //do roulette selection: pick dir with probability
|
||||||
//proportional to available size
|
//proportional to available size
|
||||||
long[] availableOnDisk = new long[dirDF.length];
|
long[] availableOnDisk = new long[dirDF.length];
|
||||||
long totalAvailable = 0;
|
long totalAvailable = 0;
|
||||||
|
@ -344,7 +348,8 @@ public class LocalDirAllocator {
|
||||||
"directory for " + pathStr);
|
"directory for " + pathStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Creates a file on the local FS. Pass size as -1 if not known apriori. We
|
/** Creates a file on the local FS. Pass size as
|
||||||
|
* {@link LocalDirAllocator.SIZE_UNKNOWN} if not known apriori. We
|
||||||
* round-robin over the set of disks (via the configured dirs) and return
|
* round-robin over the set of disks (via the configured dirs) and return
|
||||||
* a file on the first path which has enough space. The file is guaranteed
|
* a file on the first path which has enough space. The file is guaranteed
|
||||||
* to go away when the JVM exits.
|
* to go away when the JVM exits.
|
||||||
|
|
|
@ -677,4 +677,24 @@ public class StringUtils {
|
||||||
public static synchronized String limitDecimalTo2(double d) {
|
public static synchronized String limitDecimalTo2(double d) {
|
||||||
return decimalFormat.format(d);
|
return decimalFormat.format(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Concatenates strings, using a separator.
|
||||||
|
*
|
||||||
|
* @param separator Separator to join with.
|
||||||
|
* @param strings Strings to join.
|
||||||
|
*/
|
||||||
|
public static String join(CharSequence separator, Iterable<String> strings) {
|
||||||
|
StringBuffer sb = new StringBuffer();
|
||||||
|
boolean first = true;
|
||||||
|
for (String s : strings) {
|
||||||
|
if (first) {
|
||||||
|
first = false;
|
||||||
|
} else {
|
||||||
|
sb.append(separator);
|
||||||
|
}
|
||||||
|
sb.append(s);
|
||||||
|
}
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,9 @@
|
||||||
|
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import junit.framework.TestCase;
|
import junit.framework.TestCase;
|
||||||
|
|
||||||
public class TestStringUtils extends TestCase {
|
public class TestStringUtils extends TestCase {
|
||||||
|
@ -118,4 +121,15 @@ public class TestStringUtils extends TestCase {
|
||||||
assertEquals(-1259520L, StringUtils.TraditionalBinaryPrefix.string2long("-1230k"));
|
assertEquals(-1259520L, StringUtils.TraditionalBinaryPrefix.string2long("-1230k"));
|
||||||
assertEquals(956703965184L, StringUtils.TraditionalBinaryPrefix.string2long("891g"));
|
assertEquals(956703965184L, StringUtils.TraditionalBinaryPrefix.string2long("891g"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testJoin() {
|
||||||
|
List<String> s = new ArrayList<String>();
|
||||||
|
s.add("a");
|
||||||
|
s.add("b");
|
||||||
|
s.add("c");
|
||||||
|
assertEquals("", StringUtils.join(":", s.subList(0, 0)));
|
||||||
|
assertEquals("a", StringUtils.join(":", s.subList(0, 1)));
|
||||||
|
assertEquals("a:b", StringUtils.join(":", s.subList(0, 2)));
|
||||||
|
assertEquals("a:b:c", StringUtils.join(":", s.subList(0, 3)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue