diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml
index c33fbce5b6e..560ea78ed2e 100644
--- a/hadoop-assemblies/pom.xml
+++ b/hadoop-assemblies/pom.xml
@@ -20,12 +20,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-assemblies
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Assemblies
Apache Hadoop Assemblies
diff --git a/hadoop-client/pom.xml b/hadoop-client/pom.xml
index 6eecfc07ae2..fced787fe49 100644
--- a/hadoop-client/pom.xml
+++ b/hadoop-client/pom.xml
@@ -18,12 +18,12 @@
org.apache.hadoop
hadoop-project-dist
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project-dist
org.apache.hadoop
hadoop-client
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
jar
Apache Hadoop Client
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index bb9de9c5494..e9a86474871 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-annotations
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Annotations
Apache Hadoop Annotations
jar
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index 67f113ba2c4..ceeb769bfa3 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-auth-examples
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
war
Apache Hadoop Auth Examples
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 9bcf629f032..b140199f91b 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-auth
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
jar
Apache Hadoop Auth
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index efb2a756c14..1dfd0898296 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -113,7 +113,7 @@ Trunk (unreleased changes)
HADOOP-7761. Improve the performance of raw comparisons. (todd)
-Release 0.23.3 - UNRELEASED
+Release 2.0.0 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index fd18b607a25..41305460501 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project-dist
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project-dist
org.apache.hadoop
hadoop-common
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Common
Apache Hadoop Common
jar
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index d0a1d3b67a6..1202002ed5f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -28,7 +28,7 @@
hadoop.common.configuration.version
- 0.24.0
+ 3.0.0
version of this configuration file
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index ac196188a7a..57112b7cb58 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-common-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Common Project
Apache Hadoop Common Project
pom
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index de47700c993..1c8bf83aca9 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-dist
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Distribution
Apache Hadoop Distribution
jar
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index c1325e495d8..ae55c6056c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -19,12 +19,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-hdfs-httpfs
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
war
Apache Hadoop HttpFS
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 71a2ebda08d..7a2dea9583e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -109,7 +109,7 @@ Trunk (unreleased changes)
HDFS-3116. Typo in fetchdt error message. (AOE Takashi via atm)
-Release 0.23.3 - UNRELEASED
+Release 2.0.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -277,6 +277,8 @@ Release 0.23.3 - UNRELEASED
HDFS-3129. NetworkTopology: add test that getLeaf should check for
invalid topologies (Colin Patrick McCabe via eli)
+ HDFS-3155. Clean up FSDataset implemenation related code. (szetszwo)
+
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -360,6 +362,10 @@ Release 0.23.3 - UNRELEASED
HDFS-3132. Fix findbugs warning on HDFS trunk. (todd)
+ HDFS-3156. TestDFSHAAdmin is failing post HADOOP-8202. (atm)
+
+ HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm)
+
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
@@ -869,6 +875,8 @@ Release 0.23.1 - 2012-02-17
HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
+ HDFS-3139. Minor Datanode logging improvement. (eli)
+
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 13103752d3e..b87c59748b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project-dist
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project-dist
org.apache.hadoop
hadoop-hdfs
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop HDFS
Apache Hadoop HDFS
jar
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
index 1fba8466ed2..aab4d5f4ab2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
@@ -17,13 +17,13 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../../../../hadoop-project
org.apache.hadoop.contrib
hadoop-hdfs-bkjournal
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop HDFS BookKeeper Journal
Apache Hadoop HDFS BookKeeper Journal
jar
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 21e549d26a1..9c837d291f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -28,20 +28,20 @@ import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.io.WritableComparable;
/**
- * DatanodeID is composed of the data node
- * name (hostname:portNumber) and the data storage ID,
- * which it currently represents.
- *
+ * This class represents the primary identifier for a Datanode.
+ * Datanodes are identified by how they can be contacted (hostname
+ * and ports) and their storage ID, a unique number that associates
+ * the Datanodes blocks with a particular Datanode.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeID implements WritableComparable {
public static final DatanodeID[] EMPTY_ARRAY = {};
- public String name; /// hostname:portNumber
- public String storageID; /// unique per cluster storageID
- protected int infoPort; /// the port where the infoserver is running
- public int ipcPort; /// the port where the ipc server is running
+ public String name; // hostname:port (data transfer port)
+ public String storageID; // unique per cluster storageID
+ protected int infoPort; // info server port
+ public int ipcPort; // ipc server port
/** Equivalent to DatanodeID(""). */
public DatanodeID() {this("");}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 80b2d28d802..2065ae1d1eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -37,9 +37,9 @@ import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils;
/**
- * DatanodeInfo represents the status of a DataNode.
- * This object is used for communication in the
- * Datanode Protocol and the Client Protocol.
+ * This class extends the primary identifier of a Datanode with ephemeral
+ * state, eg usage information, current administrative state, and the
+ * network location that is communicated to clients.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@@ -52,12 +52,10 @@ public class DatanodeInfo extends DatanodeID implements Node {
protected int xceiverCount;
protected String location = NetworkTopology.DEFAULT_RACK;
- /** HostName as supplied by the datanode during registration as its
- * name. Namenode uses datanode IP address as the name.
- */
+ // The FQDN of the IP associated with the Datanode's hostname
protected String hostName = null;
- // administrative states of a datanode
+ // Datanode administrative states
public enum AdminStates {
NORMAL("In Service"),
DECOMMISSION_INPROGRESS("Decommission In Progress"),
@@ -241,12 +239,14 @@ public class DatanodeInfo extends DatanodeID implements Node {
long nonDFSUsed = getNonDfsUsed();
float usedPercent = getDfsUsedPercent();
float remainingPercent = getRemainingPercent();
- String hostName = NetUtils.getHostNameOfIP(name);
+ String lookupName = NetUtils.getHostNameOfIP(name);
buffer.append("Name: "+ name);
- if(hostName != null)
- buffer.append(" (" + hostName + ")");
+ if (lookupName != null) {
+ buffer.append(" (" + lookupName + ")");
+ }
buffer.append("\n");
+ buffer.append("Hostname: " + getHostName() + "\n");
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
buffer.append("Rack: "+location+"\n");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 984456f142d..f01cd0e3f68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -34,16 +34,13 @@ import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
-/**************************************************
- * DatanodeDescriptor tracks stats on a given DataNode, such as
- * available storage capacity, last update time, etc., and maintains a
- * set of blocks stored on the datanode.
- *
- * This data structure is internal to the namenode. It is *not* sent
- * over-the-wire to the Client or the Datanodes. Neither is it stored
- * persistently in the fsImage.
- **************************************************/
+/**
+ * This class extends the DatanodeInfo class with ephemeral information (eg
+ * health, capacity, what blocks are associated with the Datanode) that is
+ * private to the Namenode, ie this class is not exposed to clients.
+ */
@InterfaceAudience.Private
+@InterfaceStability.Evolving
public class DatanodeDescriptor extends DatanodeInfo {
// Stores status of decommissioning.
@@ -586,14 +583,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
/**
- * @return Blanacer bandwidth in bytes per second for this datanode.
+ * @return balancer bandwidth in bytes per second for this datanode
*/
public long getBalancerBandwidth() {
return this.bandwidth;
}
/**
- * @param bandwidth Blanacer bandwidth in bytes per second for this datanode.
+ * @param bandwidth balancer bandwidth in bytes per second for this datanode
*/
public void setBalancerBandwidth(long bandwidth) {
this.bandwidth = bandwidth;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 1449b88f8fe..72591e018ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -358,9 +358,8 @@ class BlockReceiver implements Closeable {
* This does not verify the original checksums, under the assumption
* that they have already been validated.
*/
- private void translateChunks( byte[] dataBuf, int dataOff, int len,
- byte[] checksumBuf, int checksumOff )
- throws IOException {
+ private void translateChunks( byte[] dataBuf, int dataOff, int len,
+ byte[] checksumBuf, int checksumOff ) {
if (len == 0) return;
int numChunks = (len - 1)/bytesPerChecksum + 1;
@@ -702,7 +701,7 @@ class BlockReceiver implements Closeable {
return lastPacketInBlock?-1:len;
}
- private void dropOsCacheBehindWriter(long offsetInBlock) throws IOException {
+ private void dropOsCacheBehindWriter(long offsetInBlock) {
try {
if (outFd != null &&
offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 53ee5b7c06b..6a830dbbf98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -111,10 +111,6 @@ class BlockSender implements java.io.Closeable {
/** the block to read from */
private final ExtendedBlock block;
- /** the replica to read from */
- private final Replica replica;
- /** The visible length of a replica. */
- private final long replicaVisibleLength;
/** Stream to read block data from */
private InputStream blockIn;
/** updated while using transferTo() */
@@ -189,17 +185,18 @@ class BlockSender implements java.io.Closeable {
this.readaheadLength = datanode.getDnConf().readaheadLength;
this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
+ final Replica replica;
+ final long replicaVisibleLength;
synchronized(datanode.data) {
- this.replica = getReplica(block, datanode);
- this.replicaVisibleLength = replica.getVisibleLength();
+ replica = getReplica(block, datanode);
+ replicaVisibleLength = replica.getVisibleLength();
}
// if there is a write in progress
ChunkChecksum chunkChecksum = null;
if (replica instanceof ReplicaBeingWritten) {
- long minEndOffset = startOffset + length;
- waitForMinLength((ReplicaBeingWritten)replica, minEndOffset);
- ReplicaInPipeline rip = (ReplicaInPipeline) replica;
- chunkChecksum = rip.getLastChecksumAndDataLen();
+ final ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica;
+ waitForMinLength(rbw, startOffset + length);
+ chunkChecksum = rbw.getLastChecksumAndDataLen();
}
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f7dd2a5ac3a..586084b0c9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -330,9 +330,7 @@ public class DataNode extends Configured
: new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
secureResources.getListener());
- if(LOG.isDebugEnabled()) {
- LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort);
- }
+ LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
@@ -398,7 +396,8 @@ public class DataNode extends Configured
.newReflectiveBlockingService(interDatanodeProtocolXlator);
DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service,
ipcServer);
-
+ LOG.info("Opened IPC server at " + ipcServer.getListenerAddress());
+
// set service-level authorization security policy
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
@@ -486,14 +485,14 @@ public class DataNode extends Configured
}
private void initDataXceiver(Configuration conf) throws IOException {
- InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
+ InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
// find free port or use privileged port provided
ServerSocket ss;
if(secureResources == null) {
ss = (dnConf.socketWriteTimeout > 0) ?
ServerSocketChannel.open().socket() : new ServerSocket();
- Server.bind(ss, socAddr, 0);
+ Server.bind(ss, streamingAddr, 0);
} else {
ss = secureResources.getStreamingSocket();
}
@@ -502,8 +501,7 @@ public class DataNode extends Configured
int tmpPort = ss.getLocalPort();
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
tmpPort);
- LOG.info("Opened info server at " + tmpPort);
-
+ LOG.info("Opened streaming server at " + selfAddr);
this.threadGroup = new ThreadGroup("dataXceiverServer");
this.dataXceiverServer = new Daemon(threadGroup,
new DataXceiverServer(ss, conf, this));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 64349d86c40..16244c725bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -760,8 +760,8 @@ public class DataStorage extends Storage {
/**
* Add bpStorage into bpStorageMap
*/
- private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage)
- throws IOException {
+ private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage
+ ) {
if (!this.bpStorageMap.containsKey(bpID)) {
this.bpStorageMap.put(bpID, bpStorage);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
index c59929edd6c..e3eaa6126ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
@@ -32,7 +32,7 @@ class DatanodeUtil {
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
- private final static String DISK_ERROR = "Possible disk error on file creation: ";
+ private static final String DISK_ERROR = "Possible disk error: ";
/** Get the cause of an I/O exception if caused by a possible disk error
* @param ioe an I/O exception
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
index 3a4a4b612ea..f8699630f28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
@@ -1800,7 +1800,7 @@ public class FSDataset implements FsDatasetSpi {
ReplicaInfo replicaInfo) throws IOException {
FinalizedReplica newReplicaInfo = null;
if (replicaInfo.getState() == ReplicaState.RUR &&
- ((ReplicaUnderRecovery)replicaInfo).getOrignalReplicaState() ==
+ ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() ==
ReplicaState.FINALIZED) {
newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
@@ -2036,7 +2036,7 @@ public class FSDataset implements FsDatasetSpi {
ReplicaState replicaState = dinfo.getState();
if (replicaState == ReplicaState.FINALIZED ||
(replicaState == ReplicaState.RUR &&
- ((ReplicaUnderRecovery)dinfo).getOrignalReplicaState() ==
+ ((ReplicaUnderRecovery)dinfo).getOriginalReplica().getState() ==
ReplicaState.FINALIZED)) {
v.clearPath(bpid, parent);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
index 2e15e6fce54..d5bbf04227a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
@@ -86,14 +86,6 @@ class ReplicaUnderRecovery extends ReplicaInfo {
ReplicaInfo getOriginalReplica() {
return original;
}
-
- /**
- * Get the original replica's state
- * @return the original replica's state
- */
- ReplicaState getOrignalReplicaState() {
- return original.getState();
- }
@Override //ReplicaInfo
boolean isUnlinked() {
@@ -170,6 +162,6 @@ class ReplicaUnderRecovery extends ReplicaInfo {
ReplicaRecoveryInfo createInfo() {
return new ReplicaRecoveryInfo(original.getBlockId(),
original.getBytesOnDisk(), original.getGenerationStamp(),
- getOrignalReplicaState());
+ original.getState());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
index c6744f9317c..f7da29b4c9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
@@ -69,18 +69,19 @@ public class SecureDataNodeStarter implements Daemon {
args = context.getArguments();
// Obtain secure port for data streaming to datanode
- InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
+ InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsServerConstants.WRITE_TIMEOUT);
ServerSocket ss = (socketWriteTimeout > 0) ?
ServerSocketChannel.open().socket() : new ServerSocket();
- ss.bind(socAddr, 0);
+ ss.bind(streamingAddr, 0);
// Check that we got the port we need
- if(ss.getLocalPort() != socAddr.getPort())
+ if (ss.getLocalPort() != streamingAddr.getPort()) {
throw new RuntimeException("Unable to bind on specified streaming port in secure " +
- "context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort());
+ "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+ }
// Obtain secure listener for web server
SelectChannelConnector listener =
@@ -90,15 +91,18 @@ public class SecureDataNodeStarter implements Daemon {
listener.setPort(infoSocAddr.getPort());
// Open listener here in order to bind to port as root
listener.open();
- if(listener.getPort() != infoSocAddr.getPort())
+ if (listener.getPort() != infoSocAddr.getPort()) {
throw new RuntimeException("Unable to bind on specified info port in secure " +
- "context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort());
+ "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+ }
System.err.println("Successfully obtained privileged resources (streaming port = "
+ ss + " ) (http listener port = " + listener.getConnection() +")");
- if(ss.getLocalPort() >= 1023 || listener.getPort() >= 1023)
+ if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) {
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
-
+ }
+ System.err.println("Opened streaming server at " + streamingAddr);
+ System.err.println("Opened info server at " + infoSocAddr);
resources = new SecureResources(ss, listener);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
index f3944837a7c..d838c61c90e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
@@ -21,7 +21,7 @@
-
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 6ab3f0ce66c..6717a01dabc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -1664,7 +1663,7 @@ public class MiniDFSCluster {
public void triggerBlockReports()
throws IOException {
for (DataNode dn : getDataNodes()) {
- DataNodeAdapter.triggerBlockReport(dn);
+ DataNodeTestUtils.triggerBlockReport(dn);
}
}
@@ -1672,14 +1671,14 @@ public class MiniDFSCluster {
public void triggerDeletionReports()
throws IOException {
for (DataNode dn : getDataNodes()) {
- DataNodeAdapter.triggerDeletionReport(dn);
+ DataNodeTestUtils.triggerDeletionReport(dn);
}
}
public void triggerHeartbeats()
throws IOException {
for (DataNode dn : getDataNodes()) {
- DataNodeAdapter.triggerHeartbeat(dn);
+ DataNodeTestUtils.triggerHeartbeat(dn);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index 0f0caa673b5..4d614b8d18e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -52,7 +52,7 @@ public class TestDFSAddressConfig extends TestCase {
String selfSocketAddr = dn.getSelfAddr().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
- assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+ assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
@@ -78,7 +78,7 @@ public class TestDFSAddressConfig extends TestCase {
selfSocketAddr = dn.getSelfAddr().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 127.0.0.1
- assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+ assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
@@ -103,7 +103,7 @@ public class TestDFSAddressConfig extends TestCase {
selfSocketAddr = dn.getSelfAddr().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 0.0.0.0
- assertTrue(selfSocketAddr.startsWith("/0.0.0.0:"));
+ assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
cluster.shutdown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index b0878d1eb8c..c27895f1f95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -135,7 +135,7 @@ public class TestGetBlocks extends TestCase {
namenode.getBlocks(new DatanodeInfo(), 2);
} catch(RemoteException e) {
getException = true;
- assertTrue(e.getMessage().contains("IllegalArgumentException"));
+ assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
}
assertTrue(getException);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 0222e185797..a374e50d496 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
@@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -454,7 +454,7 @@ public class TestLeaseRecovery2 {
// Make sure the DNs don't send a heartbeat for a while, so the blocks
// won't actually get completed during lease recovery.
for (DataNode dn : cluster.getDataNodes()) {
- DataNodeAdapter.setHeartbeatsDisabledForTests(dn, true);
+ DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
// set the hard limit to be 1 second
@@ -474,7 +474,7 @@ public class TestLeaseRecovery2 {
// Let the DNs send heartbeats again.
for (DataNode dn : cluster.getDataNodes()) {
- DataNodeAdapter.setHeartbeatsDisabledForTests(dn, false);
+ DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
}
cluster.waitActive();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
index 1dc0b1ebd42..0d2ebc96af6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
@@ -17,6 +17,13 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -26,22 +33,16 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test;
-import java.io.IOException;
-import java.util.List;
-import java.util.Random;
-
public class TestPipelines {
public static final Log LOG = LogFactory.getLog(TestPipelines.class);
@@ -105,7 +106,7 @@ public class TestPipelines {
String bpid = cluster.getNamesystem().getBlockPoolId();
for (DataNode dn : cluster.getDataNodes()) {
- Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0)
+ Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
.getBlock().getBlockId());
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
deleted file mode 100644
index 6ab878c5617..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.mockito.Mockito;
-
-import com.google.common.base.Preconditions;
-
-/**
- * WARNING!! This is TEST ONLY class: it never has to be used
- * for ANY development purposes.
- *
- * This is a utility class to expose DataNode functionality for
- * unit and functional tests.
- */
-public class DataNodeAdapter {
- /**
- * Fetch a copy of ReplicaInfo from a datanode by block id
- * @param dn datanode to retrieve a replicainfo object from
- * @param bpid Block pool Id
- * @param blkId id of the replica's block
- * @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo}
- */
- public static ReplicaInfo fetchReplicaInfo (final DataNode dn,
- final String bpid,
- final long blkId) {
- return ((FSDataset)dn.data).fetchReplicaInfo(bpid, blkId);
- }
-
- public static void setHeartbeatsDisabledForTests(DataNode dn,
- boolean heartbeatsDisabledForTests) {
- dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests);
- }
-
- public static void triggerDeletionReport(DataNode dn) throws IOException {
- for (BPOfferService bpos : dn.getAllBpOs()) {
- bpos.triggerDeletionReportForTests();
- }
- }
-
- public static void triggerHeartbeat(DataNode dn) throws IOException {
- for (BPOfferService bpos : dn.getAllBpOs()) {
- bpos.triggerHeartbeatForTests();
- }
- }
-
- public static void triggerBlockReport(DataNode dn) throws IOException {
- for (BPOfferService bpos : dn.getAllBpOs()) {
- bpos.triggerBlockReportForTests();
- }
- }
-
- public static long getPendingAsyncDeletions(DataNode dn) {
- FSDataset fsd = (FSDataset)dn.getFSDataset();
- return fsd.asyncDiskService.countPendingDeletions();
- }
-
- /**
- * Insert a Mockito spy object between the given DataNode and
- * the given NameNode. This can be used to delay or wait for
- * RPC calls on the datanode->NN path.
- */
- public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN(
- DataNode dn, NameNode nn) {
- String bpid = nn.getNamesystem().getBlockPoolId();
-
- BPOfferService bpos = null;
- for (BPOfferService thisBpos : dn.getAllBpOs()) {
- if (thisBpos.getBlockPoolId().equals(bpid)) {
- bpos = thisBpos;
- break;
- }
- }
- Preconditions.checkArgument(bpos != null,
- "No such bpid: %s", bpid);
-
- BPServiceActor bpsa = null;
- for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) {
- if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) {
- bpsa = thisBpsa;
- break;
- }
- }
- Preconditions.checkArgument(bpsa != null,
- "No service actor to NN at %s", nn.getServiceRpcAddress());
-
- DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy();
- DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN);
- bpsa.setNameNode(spy);
- return spy;
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index 1a871dd35e3..726c5d3ce3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -24,8 +24,13 @@ import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.mockito.Mockito;
+
+import com.google.common.base.Preconditions;
/**
* Utility class for accessing package-private DataNode information during tests.
@@ -42,6 +47,64 @@ public class DataNodeTestUtils {
return dn.getDNRegistrationForBP(bpid);
}
+ public static void setHeartbeatsDisabledForTests(DataNode dn,
+ boolean heartbeatsDisabledForTests) {
+ dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests);
+ }
+
+ public static void triggerDeletionReport(DataNode dn) throws IOException {
+ for (BPOfferService bpos : dn.getAllBpOs()) {
+ bpos.triggerDeletionReportForTests();
+ }
+ }
+
+ public static void triggerHeartbeat(DataNode dn) throws IOException {
+ for (BPOfferService bpos : dn.getAllBpOs()) {
+ bpos.triggerHeartbeatForTests();
+ }
+ }
+
+ public static void triggerBlockReport(DataNode dn) throws IOException {
+ for (BPOfferService bpos : dn.getAllBpOs()) {
+ bpos.triggerBlockReportForTests();
+ }
+ }
+
+ /**
+ * Insert a Mockito spy object between the given DataNode and
+ * the given NameNode. This can be used to delay or wait for
+ * RPC calls on the datanode->NN path.
+ */
+ public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN(
+ DataNode dn, NameNode nn) {
+ String bpid = nn.getNamesystem().getBlockPoolId();
+
+ BPOfferService bpos = null;
+ for (BPOfferService thisBpos : dn.getAllBpOs()) {
+ if (thisBpos.getBlockPoolId().equals(bpid)) {
+ bpos = thisBpos;
+ break;
+ }
+ }
+ Preconditions.checkArgument(bpos != null,
+ "No such bpid: %s", bpid);
+
+ BPServiceActor bpsa = null;
+ for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) {
+ if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) {
+ bpsa = thisBpsa;
+ break;
+ }
+ }
+ Preconditions.checkArgument(bpsa != null,
+ "No service actor to NN at %s", nn.getServiceRpcAddress());
+
+ DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy();
+ DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN);
+ bpsa.setNameNode(spy);
+ return spy;
+ }
+
/**
* This method is used for testing.
* Examples are adding and deleting blocks directly.
@@ -53,18 +116,37 @@ public class DataNodeTestUtils {
return dn.getFSDataset();
}
+ public static FSDataset getFsDatasetImpl(DataNode dn) {
+ return (FSDataset)dn.getFSDataset();
+ }
+
public static File getFile(DataNode dn, String bpid, long bid) {
- return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid);
+ return getFsDatasetImpl(dn).getFile(bpid, bid);
}
public static File getBlockFile(DataNode dn, String bpid, Block b
) throws IOException {
- return ((FSDataset)dn.getFSDataset()).getBlockFile(bpid, b);
+ return getFsDatasetImpl(dn).getBlockFile(bpid, b);
}
public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks
) throws IOException {
- ReplicaInfo info = ((FSDataset)dn.getFSDataset()).getReplicaInfo(block);
- return info.unlinkBlock(numLinks);
+ return getFsDatasetImpl(dn).getReplicaInfo(block).unlinkBlock(numLinks);
+ }
+
+ public static long getPendingAsyncDeletions(DataNode dn) {
+ return getFsDatasetImpl(dn).asyncDiskService.countPendingDeletions();
+ }
+
+ /**
+ * Fetch a copy of ReplicaInfo from a datanode by block id
+ * @param dn datanode to retrieve a replicainfo object from
+ * @param bpid Block pool Id
+ * @param blkId id of the replica's block
+ * @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo}
+ */
+ public static ReplicaInfo fetchReplicaInfo(final DataNode dn,
+ final String bpid, final long blkId) {
+ return getFsDatasetImpl(dn).fetchReplicaInfo(bpid, blkId);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index a12ac722a77..8d9ee07ea02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
index 38c631381e2..985900030ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
@@ -17,6 +17,17 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -40,27 +51,17 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.log4j.Level;
import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-
/**
* This test simulates a variety of situations when blocks are being
* intentionally orrupted, unexpectedly modified, and so on before a block
@@ -561,7 +562,7 @@ public class TestBlockReport {
// from this node.
DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy =
- DataNodeAdapter.spyOnBposToNN(dn, nn);
+ DataNodeTestUtils.spyOnBposToNN(dn, nn);
Mockito.doAnswer(delayer)
.when(spy).blockReport(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java
index 39667eddf17..81c45f37894 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
@@ -83,8 +83,8 @@ public class HAStressTestHarness {
@Override
public void doAnAction() throws Exception {
for (DataNode dn : cluster.getDataNodes()) {
- DataNodeAdapter.triggerDeletionReport(dn);
- DataNodeAdapter.triggerHeartbeat(dn);
+ DataNodeTestUtils.triggerDeletionReport(dn);
+ DataNodeTestUtils.triggerHeartbeat(dn);
}
for (int i = 0; i < 2; i++) {
NameNode nn = cluster.getNameNode(i);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
index bf919cea7f8..7224b430d12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.GenericTestUtils;
@@ -96,7 +96,7 @@ public abstract class HATestUtil {
@Override
public Boolean get() {
for (DataNode dn : cluster.getDataNodes()) {
- if (DataNodeAdapter.getPendingAsyncDeletions(dn) > 0) {
+ if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) {
return false;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
index ea769c057e1..5e657ded489 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.io.PrintWriter;
@@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -61,7 +60,6 @@ import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
@@ -72,9 +70,7 @@ import com.google.common.collect.Lists;
public class TestDNFencing {
- protected static final Log LOG = LogFactory.getLog(
- TestDNFencing.class);
- private static final String TEST_FILE_DATA = "hello highly available world";
+ protected static final Log LOG = LogFactory.getLog(TestDNFencing.class);
private static final String TEST_FILE = "/testStandbyIsHot";
private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
private static final int SMALL_BLOCK = 1024;
@@ -497,7 +493,7 @@ public class TestDNFencing {
DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy =
- DataNodeAdapter.spyOnBposToNN(dn, nn2);
+ DataNodeTestUtils.spyOnBposToNN(dn, nn2);
Mockito.doAnswer(delayer)
.when(spy).blockReport(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index c9bae53a28a..815be593599 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -17,7 +17,10 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
@@ -45,7 +48,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
@@ -54,9 +57,7 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
-
import org.apache.log4j.Level;
-
import org.junit.Test;
import org.mockito.Mockito;
@@ -297,7 +298,7 @@ public class TestPipelinesFailover {
// active.
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy =
- DataNodeAdapter.spyOnBposToNN(primaryDN, nn0);
+ DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
// Delay the commitBlockSynchronization call
DelayAnswer delayer = new DelayAnswer(LOG);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
index ce5814b0dd0..ddfd573b4c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
@@ -35,14 +35,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -225,7 +225,7 @@ public class TestStandbyIsHot {
LOG.info("Got " + numReplicas + " locs: " + locs);
if (numReplicas > expectedReplicas) {
for (DataNode dn : cluster.getDataNodes()) {
- DataNodeAdapter.triggerDeletionReport(dn);
+ DataNodeTestUtils.triggerDeletionReport(dn);
}
}
return numReplicas == expectedReplicas;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 89fde370c0c..79793e2440d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.NodeFencer;
+import org.apache.hadoop.test.MockitoUtil;
import org.junit.Before;
import org.junit.Test;
@@ -79,7 +80,7 @@ public class TestDFSHAAdmin {
@Before
public void setup() throws IOException {
- mockProtocol = Mockito.mock(HAServiceProtocol.class);
+ mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
tool = new DFSHAAdmin() {
@Override
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index 299d6f86348..1d39a2c4a91 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-hdfs-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop HDFS Project
Apache Hadoop HDFS Project
pom
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d79d3ae1390..d0c538e8884 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -96,7 +96,7 @@ Trunk (unreleased changes)
MAPREDUCE-1740. NPE in getMatchingLevelForNodes when node locations are
variable depth (ahmed via tucu) [IMPORTANT: this is dead code in trunk]
-Release 0.23.3 - UNRELEASED
+Release 2.0.0 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/build.xml b/hadoop-mapreduce-project/build.xml
index 22964d38e0c..b2d44ec87b7 100644
--- a/hadoop-mapreduce-project/build.xml
+++ b/hadoop-mapreduce-project/build.xml
@@ -32,7 +32,7 @@
-
+
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index 88032cad0a5..2059d280380 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -16,12 +16,12 @@
hadoop-mapreduce-client
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-mapreduce-client-app
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-mapreduce-client-app
@@ -112,7 +112,7 @@
-
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index e33e589c9e2..1dd455877ea 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -16,12 +16,12 @@
hadoop-mapreduce-client
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-mapreduce-client-common
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-mapreduce-client-common
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index ca194cc239f..cfb8ce4bd7e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -16,12 +16,12 @@
hadoop-mapreduce-client
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-mapreduce-client-core
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-mapreduce-client-core
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
index 863cb58c1ba..9fa93b4c4f6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
@@ -16,12 +16,12 @@
hadoop-mapreduce-client
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-mapreduce-client-hs
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-mapreduce-client-hs
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index b12c09aae18..4df2edaa5c4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -16,12 +16,12 @@
hadoop-mapreduce-client
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-mapreduce-client-jobclient
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-mapreduce-client-jobclient
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 3af3129fafa..07f436e7617 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -16,12 +16,12 @@
hadoop-mapreduce-client
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-mapreduce-client-shuffle
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-mapreduce-client-shuffle
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index 63113611b9e..ad08306ba77 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-mapreduce-client
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-mapreduce-client
pom
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index ac365b20106..83424a80de2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-mapreduce-examples
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop MapReduce Examples
Apache Hadoop MapReduce Examples
jar
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 4302f815cd2..462ecc361ff 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-api
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-api
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index a21cd11ca46..37a5e99702c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn-applications
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-applications-distributedshell
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-applications-distributedshell
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index ecd886bb96d..fd51584ab7f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-applications
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-applications
pom
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 89d8566538e..1acf220a876 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-common
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-common
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 53c5afb501a..2dd4277a080 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn-server
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-server-common
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-server-common
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 1272dde76fa..6032aabd6a4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn-server
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-server-nodemanager
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-server-nodemanager
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index b65a5246962..fd7b767faa8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn-server
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-server-resourcemanager
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-server-resourcemanager
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index fe95cdf530e..87c5d7e174a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -16,11 +16,11 @@
hadoop-yarn-server
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
org.apache.hadoop
hadoop-yarn-server-tests
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-server-tests
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
index 6e657bd9abc..7be3676c5c4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn-server
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-server-web-proxy
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-server-web-proxy
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index 3d82949e286..2de4e331e5b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-server
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-server
pom
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml
index 887950e3741..5cc90dbddfd 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml
@@ -16,12 +16,12 @@
hadoop-yarn
org.apache.hadoop
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
4.0.0
org.apache.hadoop
hadoop-yarn-site
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
hadoop-yarn-site
diff --git a/hadoop-mapreduce-project/hadoop-yarn/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/pom.xml
index 6b5f6e17c18..0f76b24480c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-yarn
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
pom
hadoop-yarn
diff --git a/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml b/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml
index 701a00537c2..142f11b2ee3 100644
--- a/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml
+++ b/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml
@@ -28,7 +28,7 @@
org.apache.hadoop
hadoop-common
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
diff --git a/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml b/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml
index 23813cab3cf..7bfb31d3a82 100644
--- a/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml
+++ b/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml
@@ -28,7 +28,7 @@
org.apache.hadoop
hadoop-common
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
diff --git a/hadoop-mapreduce-project/ivy/libraries.properties b/hadoop-mapreduce-project/ivy/libraries.properties
index 33a3fdc479e..527900b2281 100644
--- a/hadoop-mapreduce-project/ivy/libraries.properties
+++ b/hadoop-mapreduce-project/ivy/libraries.properties
@@ -41,8 +41,8 @@ ftplet-api.version=1.0.0
ftpserver-core.version=1.0.0
ftpserver-deprecated.version=1.0.0-M2
-hadoop-common.version=0.24.0-SNAPSHOT
-hadoop-hdfs.version=0.24.0-SNAPSHOT
+hadoop-common.version=3.0.0-SNAPSHOT
+hadoop-hdfs.version=3.0.0-SNAPSHOT
hsqldb.version=1.8.0.10
@@ -82,5 +82,5 @@ xmlenc.version=0.52
xerces.version=1.4.4
jackson.version=1.8.8
-yarn.version=0.24.0-SNAPSHOT
-hadoop-mapreduce.version=0.24.0-SNAPSHOT
+yarn.version=3.0.0-SNAPSHOT
+hadoop-mapreduce.version=3.0.0-SNAPSHOT
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index a4f321679ca..859eb26a6d0 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -18,12 +18,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-mapreduce
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
pom
hadoop-mapreduce
http://hadoop.apache.org/mapreduce/
diff --git a/hadoop-mapreduce-project/src/test/aop/build/aop.xml b/hadoop-mapreduce-project/src/test/aop/build/aop.xml
index 9029e629a18..390a0953f1a 100644
--- a/hadoop-mapreduce-project/src/test/aop/build/aop.xml
+++ b/hadoop-mapreduce-project/src/test/aop/build/aop.xml
@@ -21,7 +21,7 @@
-
+
diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml
index 793a98db9cc..c9e54d3dba7 100644
--- a/hadoop-minicluster/pom.xml
+++ b/hadoop-minicluster/pom.xml
@@ -18,12 +18,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-minicluster
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
jar
Apache Hadoop Mini-Cluster
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 41ba7564a27..1c7042516b8 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-project-dist
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Project Dist POM
Apache Hadoop Project Dist POM
pom
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index dee063166f6..b37b0bad952 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -17,11 +17,11 @@
org.apache.hadoop
hadoop-main
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Project POM
Apache Hadoop Project POM
pom
diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml
index 73f5201004d..1560cb08835 100644
--- a/hadoop-tools/hadoop-archives/pom.xml
+++ b/hadoop-tools/hadoop-archives/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-archives
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Archives
Apache Hadoop Archives
jar
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index 125a118edb4..46e0c1a6ebe 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-distcp
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Distributed Copy
Apache Hadoop Distributed Copy
jar
diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml
index aadb2dbd6e3..da8bd155ba9 100644
--- a/hadoop-tools/hadoop-extras/pom.xml
+++ b/hadoop-tools/hadoop-extras/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-extras
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Extras
Apache Hadoop Extras
jar
diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml
index 00bfa937df4..0c3d2257382 100644
--- a/hadoop-tools/hadoop-rumen/pom.xml
+++ b/hadoop-tools/hadoop-rumen/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-rumen
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Rumen
Apache Hadoop Rumen
jar
diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml
index 85d9ba65538..c19872e6303 100644
--- a/hadoop-tools/hadoop-streaming/pom.xml
+++ b/hadoop-tools/hadoop-streaming/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project
org.apache.hadoop
hadoop-streaming
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop MapReduce Streaming
Apache Hadoop MapReduce Streaming
jar
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index 3395e4f2d64..46d1c195305 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project-dist
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../../hadoop-project-dist
org.apache.hadoop
hadoop-tools-dist
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Tools Dist
Apache Hadoop Tools Dist
jar
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index a4e99aa77b5..dfa9049a80e 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -17,12 +17,12 @@
org.apache.hadoop
hadoop-project
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
../hadoop-project
org.apache.hadoop
hadoop-tools
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Tools
Apache Hadoop Tools
pom
diff --git a/pom.xml b/pom.xml
index cac707e7c7f..d2eae64d4ed 100644
--- a/pom.xml
+++ b/pom.xml
@@ -16,7 +16,7 @@
4.0.0
org.apache.hadoop
hadoop-main
- 0.24.0-SNAPSHOT
+ 3.0.0-SNAPSHOT
Apache Hadoop Main
Apache Hadoop Main
pom