Merge trunk into HDFS-3042 branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3042@1306587 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
a9471b97c1
|
@ -20,12 +20,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-assemblies</artifactId>
|
<artifactId>hadoop-assemblies</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>Apache Hadoop Assemblies</name>
|
<name>Apache Hadoop Assemblies</name>
|
||||||
<description>Apache Hadoop Assemblies</description>
|
<description>Apache Hadoop Assemblies</description>
|
||||||
|
|
||||||
|
|
|
@ -18,12 +18,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project-dist</artifactId>
|
<artifactId>hadoop-project-dist</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project-dist</relativePath>
|
<relativePath>../hadoop-project-dist</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-client</artifactId>
|
<artifactId>hadoop-client</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<description>Apache Hadoop Client</description>
|
<description>Apache Hadoop Client</description>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-annotations</artifactId>
|
<artifactId>hadoop-annotations</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Annotations</description>
|
<description>Apache Hadoop Annotations</description>
|
||||||
<name>Apache Hadoop Annotations</name>
|
<name>Apache Hadoop Annotations</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-auth-examples</artifactId>
|
<artifactId>hadoop-auth-examples</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<packaging>war</packaging>
|
<packaging>war</packaging>
|
||||||
|
|
||||||
<name>Apache Hadoop Auth Examples</name>
|
<name>Apache Hadoop Auth Examples</name>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-auth</artifactId>
|
<artifactId>hadoop-auth</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<name>Apache Hadoop Auth</name>
|
<name>Apache Hadoop Auth</name>
|
||||||
|
|
|
@ -113,7 +113,7 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||||
|
|
||||||
Release 0.23.3 - UNRELEASED
|
Release 2.0.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project-dist</artifactId>
|
<artifactId>hadoop-project-dist</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project-dist</relativePath>
|
<relativePath>../../hadoop-project-dist</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Common</description>
|
<description>Apache Hadoop Common</description>
|
||||||
<name>Apache Hadoop Common</name>
|
<name>Apache Hadoop Common</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.common.configuration.version</name>
|
<name>hadoop.common.configuration.version</name>
|
||||||
<value>0.24.0</value>
|
<value>3.0.0</value>
|
||||||
<description>version of this configuration file</description>
|
<description>version of this configuration file</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common-project</artifactId>
|
<artifactId>hadoop-common-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Common Project</description>
|
<description>Apache Hadoop Common Project</description>
|
||||||
<name>Apache Hadoop Common Project</name>
|
<name>Apache Hadoop Common Project</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-dist</artifactId>
|
<artifactId>hadoop-dist</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Distribution</description>
|
<description>Apache Hadoop Distribution</description>
|
||||||
<name>Apache Hadoop Distribution</name>
|
<name>Apache Hadoop Distribution</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs-httpfs</artifactId>
|
<artifactId>hadoop-hdfs-httpfs</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<packaging>war</packaging>
|
<packaging>war</packaging>
|
||||||
|
|
||||||
<name>Apache Hadoop HttpFS</name>
|
<name>Apache Hadoop HttpFS</name>
|
||||||
|
|
|
@ -109,7 +109,7 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
HDFS-3116. Typo in fetchdt error message. (AOE Takashi via atm)
|
HDFS-3116. Typo in fetchdt error message. (AOE Takashi via atm)
|
||||||
|
|
||||||
Release 0.23.3 - UNRELEASED
|
Release 2.0.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
@ -277,6 +277,8 @@ Release 0.23.3 - UNRELEASED
|
||||||
HDFS-3129. NetworkTopology: add test that getLeaf should check for
|
HDFS-3129. NetworkTopology: add test that getLeaf should check for
|
||||||
invalid topologies (Colin Patrick McCabe via eli)
|
invalid topologies (Colin Patrick McCabe via eli)
|
||||||
|
|
||||||
|
HDFS-3155. Clean up FSDataset implemenation related code. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||||
|
@ -360,6 +362,10 @@ Release 0.23.3 - UNRELEASED
|
||||||
|
|
||||||
HDFS-3132. Fix findbugs warning on HDFS trunk. (todd)
|
HDFS-3132. Fix findbugs warning on HDFS trunk. (todd)
|
||||||
|
|
||||||
|
HDFS-3156. TestDFSHAAdmin is failing post HADOOP-8202. (atm)
|
||||||
|
|
||||||
|
HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||||
|
|
||||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||||
|
@ -869,6 +875,8 @@ Release 0.23.1 - 2012-02-17
|
||||||
|
|
||||||
HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
|
HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
|
||||||
|
|
||||||
|
HDFS-3139. Minor Datanode logging improvement. (eli)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project-dist</artifactId>
|
<artifactId>hadoop-project-dist</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project-dist</relativePath>
|
<relativePath>../../hadoop-project-dist</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs</artifactId>
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop HDFS</description>
|
<description>Apache Hadoop HDFS</description>
|
||||||
<name>Apache Hadoop HDFS</name>
|
<name>Apache Hadoop HDFS</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,13 +17,13 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../../../../hadoop-project</relativePath>
|
<relativePath>../../../../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.apache.hadoop.contrib</groupId>
|
<groupId>org.apache.hadoop.contrib</groupId>
|
||||||
<artifactId>hadoop-hdfs-bkjournal</artifactId>
|
<artifactId>hadoop-hdfs-bkjournal</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop HDFS BookKeeper Journal</description>
|
<description>Apache Hadoop HDFS BookKeeper Journal</description>
|
||||||
<name>Apache Hadoop HDFS BookKeeper Journal</name>
|
<name>Apache Hadoop HDFS BookKeeper Journal</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -28,20 +28,20 @@ import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||||
import org.apache.hadoop.io.WritableComparable;
|
import org.apache.hadoop.io.WritableComparable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DatanodeID is composed of the data node
|
* This class represents the primary identifier for a Datanode.
|
||||||
* name (hostname:portNumber) and the data storage ID,
|
* Datanodes are identified by how they can be contacted (hostname
|
||||||
* which it currently represents.
|
* and ports) and their storage ID, a unique number that associates
|
||||||
*
|
* the Datanodes blocks with a particular Datanode.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class DatanodeID implements WritableComparable<DatanodeID> {
|
public class DatanodeID implements WritableComparable<DatanodeID> {
|
||||||
public static final DatanodeID[] EMPTY_ARRAY = {};
|
public static final DatanodeID[] EMPTY_ARRAY = {};
|
||||||
|
|
||||||
public String name; /// hostname:portNumber
|
public String name; // hostname:port (data transfer port)
|
||||||
public String storageID; /// unique per cluster storageID
|
public String storageID; // unique per cluster storageID
|
||||||
protected int infoPort; /// the port where the infoserver is running
|
protected int infoPort; // info server port
|
||||||
public int ipcPort; /// the port where the ipc server is running
|
public int ipcPort; // ipc server port
|
||||||
|
|
||||||
/** Equivalent to DatanodeID(""). */
|
/** Equivalent to DatanodeID(""). */
|
||||||
public DatanodeID() {this("");}
|
public DatanodeID() {this("");}
|
||||||
|
|
|
@ -37,9 +37,9 @@ import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DatanodeInfo represents the status of a DataNode.
|
* This class extends the primary identifier of a Datanode with ephemeral
|
||||||
* This object is used for communication in the
|
* state, eg usage information, current administrative state, and the
|
||||||
* Datanode Protocol and the Client Protocol.
|
* network location that is communicated to clients.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
@ -52,12 +52,10 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
||||||
protected int xceiverCount;
|
protected int xceiverCount;
|
||||||
protected String location = NetworkTopology.DEFAULT_RACK;
|
protected String location = NetworkTopology.DEFAULT_RACK;
|
||||||
|
|
||||||
/** HostName as supplied by the datanode during registration as its
|
// The FQDN of the IP associated with the Datanode's hostname
|
||||||
* name. Namenode uses datanode IP address as the name.
|
|
||||||
*/
|
|
||||||
protected String hostName = null;
|
protected String hostName = null;
|
||||||
|
|
||||||
// administrative states of a datanode
|
// Datanode administrative states
|
||||||
public enum AdminStates {
|
public enum AdminStates {
|
||||||
NORMAL("In Service"),
|
NORMAL("In Service"),
|
||||||
DECOMMISSION_INPROGRESS("Decommission In Progress"),
|
DECOMMISSION_INPROGRESS("Decommission In Progress"),
|
||||||
|
@ -241,12 +239,14 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
||||||
long nonDFSUsed = getNonDfsUsed();
|
long nonDFSUsed = getNonDfsUsed();
|
||||||
float usedPercent = getDfsUsedPercent();
|
float usedPercent = getDfsUsedPercent();
|
||||||
float remainingPercent = getRemainingPercent();
|
float remainingPercent = getRemainingPercent();
|
||||||
String hostName = NetUtils.getHostNameOfIP(name);
|
String lookupName = NetUtils.getHostNameOfIP(name);
|
||||||
|
|
||||||
buffer.append("Name: "+ name);
|
buffer.append("Name: "+ name);
|
||||||
if(hostName != null)
|
if (lookupName != null) {
|
||||||
buffer.append(" (" + hostName + ")");
|
buffer.append(" (" + lookupName + ")");
|
||||||
|
}
|
||||||
buffer.append("\n");
|
buffer.append("\n");
|
||||||
|
buffer.append("Hostname: " + getHostName() + "\n");
|
||||||
|
|
||||||
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
|
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
|
||||||
buffer.append("Rack: "+location+"\n");
|
buffer.append("Rack: "+location+"\n");
|
||||||
|
|
|
@ -34,16 +34,13 @@ import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
|
|
||||||
/**************************************************
|
/**
|
||||||
* DatanodeDescriptor tracks stats on a given DataNode, such as
|
* This class extends the DatanodeInfo class with ephemeral information (eg
|
||||||
* available storage capacity, last update time, etc., and maintains a
|
* health, capacity, what blocks are associated with the Datanode) that is
|
||||||
* set of blocks stored on the datanode.
|
* private to the Namenode, ie this class is not exposed to clients.
|
||||||
*
|
*/
|
||||||
* This data structure is internal to the namenode. It is *not* sent
|
|
||||||
* over-the-wire to the Client or the Datanodes. Neither is it stored
|
|
||||||
* persistently in the fsImage.
|
|
||||||
**************************************************/
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public class DatanodeDescriptor extends DatanodeInfo {
|
public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
|
|
||||||
// Stores status of decommissioning.
|
// Stores status of decommissioning.
|
||||||
|
@ -586,14 +583,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Blanacer bandwidth in bytes per second for this datanode.
|
* @return balancer bandwidth in bytes per second for this datanode
|
||||||
*/
|
*/
|
||||||
public long getBalancerBandwidth() {
|
public long getBalancerBandwidth() {
|
||||||
return this.bandwidth;
|
return this.bandwidth;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param bandwidth Blanacer bandwidth in bytes per second for this datanode.
|
* @param bandwidth balancer bandwidth in bytes per second for this datanode
|
||||||
*/
|
*/
|
||||||
public void setBalancerBandwidth(long bandwidth) {
|
public void setBalancerBandwidth(long bandwidth) {
|
||||||
this.bandwidth = bandwidth;
|
this.bandwidth = bandwidth;
|
||||||
|
|
|
@ -358,9 +358,8 @@ class BlockReceiver implements Closeable {
|
||||||
* This does not verify the original checksums, under the assumption
|
* This does not verify the original checksums, under the assumption
|
||||||
* that they have already been validated.
|
* that they have already been validated.
|
||||||
*/
|
*/
|
||||||
private void translateChunks( byte[] dataBuf, int dataOff, int len,
|
private void translateChunks( byte[] dataBuf, int dataOff, int len,
|
||||||
byte[] checksumBuf, int checksumOff )
|
byte[] checksumBuf, int checksumOff ) {
|
||||||
throws IOException {
|
|
||||||
if (len == 0) return;
|
if (len == 0) return;
|
||||||
|
|
||||||
int numChunks = (len - 1)/bytesPerChecksum + 1;
|
int numChunks = (len - 1)/bytesPerChecksum + 1;
|
||||||
|
@ -702,7 +701,7 @@ class BlockReceiver implements Closeable {
|
||||||
return lastPacketInBlock?-1:len;
|
return lastPacketInBlock?-1:len;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dropOsCacheBehindWriter(long offsetInBlock) throws IOException {
|
private void dropOsCacheBehindWriter(long offsetInBlock) {
|
||||||
try {
|
try {
|
||||||
if (outFd != null &&
|
if (outFd != null &&
|
||||||
offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) {
|
offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) {
|
||||||
|
|
|
@ -111,10 +111,6 @@ class BlockSender implements java.io.Closeable {
|
||||||
|
|
||||||
/** the block to read from */
|
/** the block to read from */
|
||||||
private final ExtendedBlock block;
|
private final ExtendedBlock block;
|
||||||
/** the replica to read from */
|
|
||||||
private final Replica replica;
|
|
||||||
/** The visible length of a replica. */
|
|
||||||
private final long replicaVisibleLength;
|
|
||||||
/** Stream to read block data from */
|
/** Stream to read block data from */
|
||||||
private InputStream blockIn;
|
private InputStream blockIn;
|
||||||
/** updated while using transferTo() */
|
/** updated while using transferTo() */
|
||||||
|
@ -189,17 +185,18 @@ class BlockSender implements java.io.Closeable {
|
||||||
this.readaheadLength = datanode.getDnConf().readaheadLength;
|
this.readaheadLength = datanode.getDnConf().readaheadLength;
|
||||||
this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
|
this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
|
||||||
|
|
||||||
|
final Replica replica;
|
||||||
|
final long replicaVisibleLength;
|
||||||
synchronized(datanode.data) {
|
synchronized(datanode.data) {
|
||||||
this.replica = getReplica(block, datanode);
|
replica = getReplica(block, datanode);
|
||||||
this.replicaVisibleLength = replica.getVisibleLength();
|
replicaVisibleLength = replica.getVisibleLength();
|
||||||
}
|
}
|
||||||
// if there is a write in progress
|
// if there is a write in progress
|
||||||
ChunkChecksum chunkChecksum = null;
|
ChunkChecksum chunkChecksum = null;
|
||||||
if (replica instanceof ReplicaBeingWritten) {
|
if (replica instanceof ReplicaBeingWritten) {
|
||||||
long minEndOffset = startOffset + length;
|
final ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica;
|
||||||
waitForMinLength((ReplicaBeingWritten)replica, minEndOffset);
|
waitForMinLength(rbw, startOffset + length);
|
||||||
ReplicaInPipeline rip = (ReplicaInPipeline) replica;
|
chunkChecksum = rbw.getLastChecksumAndDataLen();
|
||||||
chunkChecksum = rip.getLastChecksumAndDataLen();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
|
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
|
||||||
|
|
|
@ -330,9 +330,7 @@ public class DataNode extends Configured
|
||||||
: new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
|
: new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
|
||||||
conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
|
conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
|
||||||
secureResources.getListener());
|
secureResources.getListener());
|
||||||
if(LOG.isDebugEnabled()) {
|
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
|
||||||
LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort);
|
|
||||||
}
|
|
||||||
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
|
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
|
||||||
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
||||||
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
|
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
|
||||||
|
@ -398,7 +396,8 @@ public class DataNode extends Configured
|
||||||
.newReflectiveBlockingService(interDatanodeProtocolXlator);
|
.newReflectiveBlockingService(interDatanodeProtocolXlator);
|
||||||
DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service,
|
DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service,
|
||||||
ipcServer);
|
ipcServer);
|
||||||
|
LOG.info("Opened IPC server at " + ipcServer.getListenerAddress());
|
||||||
|
|
||||||
// set service-level authorization security policy
|
// set service-level authorization security policy
|
||||||
if (conf.getBoolean(
|
if (conf.getBoolean(
|
||||||
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
|
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
|
||||||
|
@ -486,14 +485,14 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initDataXceiver(Configuration conf) throws IOException {
|
private void initDataXceiver(Configuration conf) throws IOException {
|
||||||
InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
|
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
|
||||||
|
|
||||||
// find free port or use privileged port provided
|
// find free port or use privileged port provided
|
||||||
ServerSocket ss;
|
ServerSocket ss;
|
||||||
if(secureResources == null) {
|
if(secureResources == null) {
|
||||||
ss = (dnConf.socketWriteTimeout > 0) ?
|
ss = (dnConf.socketWriteTimeout > 0) ?
|
||||||
ServerSocketChannel.open().socket() : new ServerSocket();
|
ServerSocketChannel.open().socket() : new ServerSocket();
|
||||||
Server.bind(ss, socAddr, 0);
|
Server.bind(ss, streamingAddr, 0);
|
||||||
} else {
|
} else {
|
||||||
ss = secureResources.getStreamingSocket();
|
ss = secureResources.getStreamingSocket();
|
||||||
}
|
}
|
||||||
|
@ -502,8 +501,7 @@ public class DataNode extends Configured
|
||||||
int tmpPort = ss.getLocalPort();
|
int tmpPort = ss.getLocalPort();
|
||||||
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
|
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
|
||||||
tmpPort);
|
tmpPort);
|
||||||
LOG.info("Opened info server at " + tmpPort);
|
LOG.info("Opened streaming server at " + selfAddr);
|
||||||
|
|
||||||
this.threadGroup = new ThreadGroup("dataXceiverServer");
|
this.threadGroup = new ThreadGroup("dataXceiverServer");
|
||||||
this.dataXceiverServer = new Daemon(threadGroup,
|
this.dataXceiverServer = new Daemon(threadGroup,
|
||||||
new DataXceiverServer(ss, conf, this));
|
new DataXceiverServer(ss, conf, this));
|
||||||
|
|
|
@ -760,8 +760,8 @@ public class DataStorage extends Storage {
|
||||||
/**
|
/**
|
||||||
* Add bpStorage into bpStorageMap
|
* Add bpStorage into bpStorageMap
|
||||||
*/
|
*/
|
||||||
private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage)
|
private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage
|
||||||
throws IOException {
|
) {
|
||||||
if (!this.bpStorageMap.containsKey(bpID)) {
|
if (!this.bpStorageMap.containsKey(bpID)) {
|
||||||
this.bpStorageMap.put(bpID, bpStorage);
|
this.bpStorageMap.put(bpID, bpStorage);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ class DatanodeUtil {
|
||||||
|
|
||||||
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
|
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
|
||||||
|
|
||||||
private final static String DISK_ERROR = "Possible disk error on file creation: ";
|
private static final String DISK_ERROR = "Possible disk error: ";
|
||||||
|
|
||||||
/** Get the cause of an I/O exception if caused by a possible disk error
|
/** Get the cause of an I/O exception if caused by a possible disk error
|
||||||
* @param ioe an I/O exception
|
* @param ioe an I/O exception
|
||||||
|
|
|
@ -1800,7 +1800,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
||||||
ReplicaInfo replicaInfo) throws IOException {
|
ReplicaInfo replicaInfo) throws IOException {
|
||||||
FinalizedReplica newReplicaInfo = null;
|
FinalizedReplica newReplicaInfo = null;
|
||||||
if (replicaInfo.getState() == ReplicaState.RUR &&
|
if (replicaInfo.getState() == ReplicaState.RUR &&
|
||||||
((ReplicaUnderRecovery)replicaInfo).getOrignalReplicaState() ==
|
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() ==
|
||||||
ReplicaState.FINALIZED) {
|
ReplicaState.FINALIZED) {
|
||||||
newReplicaInfo = (FinalizedReplica)
|
newReplicaInfo = (FinalizedReplica)
|
||||||
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
|
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
|
||||||
|
@ -2036,7 +2036,7 @@ public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
|
||||||
ReplicaState replicaState = dinfo.getState();
|
ReplicaState replicaState = dinfo.getState();
|
||||||
if (replicaState == ReplicaState.FINALIZED ||
|
if (replicaState == ReplicaState.FINALIZED ||
|
||||||
(replicaState == ReplicaState.RUR &&
|
(replicaState == ReplicaState.RUR &&
|
||||||
((ReplicaUnderRecovery)dinfo).getOrignalReplicaState() ==
|
((ReplicaUnderRecovery)dinfo).getOriginalReplica().getState() ==
|
||||||
ReplicaState.FINALIZED)) {
|
ReplicaState.FINALIZED)) {
|
||||||
v.clearPath(bpid, parent);
|
v.clearPath(bpid, parent);
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,14 +86,6 @@ class ReplicaUnderRecovery extends ReplicaInfo {
|
||||||
ReplicaInfo getOriginalReplica() {
|
ReplicaInfo getOriginalReplica() {
|
||||||
return original;
|
return original;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the original replica's state
|
|
||||||
* @return the original replica's state
|
|
||||||
*/
|
|
||||||
ReplicaState getOrignalReplicaState() {
|
|
||||||
return original.getState();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override //ReplicaInfo
|
@Override //ReplicaInfo
|
||||||
boolean isUnlinked() {
|
boolean isUnlinked() {
|
||||||
|
@ -170,6 +162,6 @@ class ReplicaUnderRecovery extends ReplicaInfo {
|
||||||
ReplicaRecoveryInfo createInfo() {
|
ReplicaRecoveryInfo createInfo() {
|
||||||
return new ReplicaRecoveryInfo(original.getBlockId(),
|
return new ReplicaRecoveryInfo(original.getBlockId(),
|
||||||
original.getBytesOnDisk(), original.getGenerationStamp(),
|
original.getBytesOnDisk(), original.getGenerationStamp(),
|
||||||
getOrignalReplicaState());
|
original.getState());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,18 +69,19 @@ public class SecureDataNodeStarter implements Daemon {
|
||||||
args = context.getArguments();
|
args = context.getArguments();
|
||||||
|
|
||||||
// Obtain secure port for data streaming to datanode
|
// Obtain secure port for data streaming to datanode
|
||||||
InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
|
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
|
||||||
int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
||||||
HdfsServerConstants.WRITE_TIMEOUT);
|
HdfsServerConstants.WRITE_TIMEOUT);
|
||||||
|
|
||||||
ServerSocket ss = (socketWriteTimeout > 0) ?
|
ServerSocket ss = (socketWriteTimeout > 0) ?
|
||||||
ServerSocketChannel.open().socket() : new ServerSocket();
|
ServerSocketChannel.open().socket() : new ServerSocket();
|
||||||
ss.bind(socAddr, 0);
|
ss.bind(streamingAddr, 0);
|
||||||
|
|
||||||
// Check that we got the port we need
|
// Check that we got the port we need
|
||||||
if(ss.getLocalPort() != socAddr.getPort())
|
if (ss.getLocalPort() != streamingAddr.getPort()) {
|
||||||
throw new RuntimeException("Unable to bind on specified streaming port in secure " +
|
throw new RuntimeException("Unable to bind on specified streaming port in secure " +
|
||||||
"context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort());
|
"context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
|
||||||
|
}
|
||||||
|
|
||||||
// Obtain secure listener for web server
|
// Obtain secure listener for web server
|
||||||
SelectChannelConnector listener =
|
SelectChannelConnector listener =
|
||||||
|
@ -90,15 +91,18 @@ public class SecureDataNodeStarter implements Daemon {
|
||||||
listener.setPort(infoSocAddr.getPort());
|
listener.setPort(infoSocAddr.getPort());
|
||||||
// Open listener here in order to bind to port as root
|
// Open listener here in order to bind to port as root
|
||||||
listener.open();
|
listener.open();
|
||||||
if(listener.getPort() != infoSocAddr.getPort())
|
if (listener.getPort() != infoSocAddr.getPort()) {
|
||||||
throw new RuntimeException("Unable to bind on specified info port in secure " +
|
throw new RuntimeException("Unable to bind on specified info port in secure " +
|
||||||
"context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort());
|
"context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
|
||||||
|
}
|
||||||
System.err.println("Successfully obtained privileged resources (streaming port = "
|
System.err.println("Successfully obtained privileged resources (streaming port = "
|
||||||
+ ss + " ) (http listener port = " + listener.getConnection() +")");
|
+ ss + " ) (http listener port = " + listener.getConnection() +")");
|
||||||
|
|
||||||
if(ss.getLocalPort() >= 1023 || listener.getPort() >= 1023)
|
if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) {
|
||||||
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
|
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
|
||||||
|
}
|
||||||
|
System.err.println("Opened streaming server at " + streamingAddr);
|
||||||
|
System.err.println("Opened info server at " + infoSocAddr);
|
||||||
resources = new SecureResources(ss, listener);
|
resources = new SecureResources(ss, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
<property name="aspectversion" value="1.6.5"/>
|
<property name="aspectversion" value="1.6.5"/>
|
||||||
<!-- TODO this has to be changed synchronously with build.xml version prop.-->
|
<!-- TODO this has to be changed synchronously with build.xml version prop.-->
|
||||||
<!-- this workarounds of test-patch setting its own 'version' -->
|
<!-- this workarounds of test-patch setting its own 'version' -->
|
||||||
<property name="project.version" value="0.24.0-SNAPSHOT"/>
|
<property name="project.version" value="3.0.0-SNAPSHOT"/>
|
||||||
|
|
||||||
<!-- Properties common for all fault injections -->
|
<!-- Properties common for all fault injections -->
|
||||||
<property name="build-fi.dir" value="${basedir}/build-fi"/>
|
<property name="build-fi.dir" value="${basedir}/build-fi"/>
|
||||||
|
|
|
@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||||
|
@ -1664,7 +1663,7 @@ public class MiniDFSCluster {
|
||||||
public void triggerBlockReports()
|
public void triggerBlockReports()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (DataNode dn : getDataNodes()) {
|
for (DataNode dn : getDataNodes()) {
|
||||||
DataNodeAdapter.triggerBlockReport(dn);
|
DataNodeTestUtils.triggerBlockReport(dn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1672,14 +1671,14 @@ public class MiniDFSCluster {
|
||||||
public void triggerDeletionReports()
|
public void triggerDeletionReports()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (DataNode dn : getDataNodes()) {
|
for (DataNode dn : getDataNodes()) {
|
||||||
DataNodeAdapter.triggerDeletionReport(dn);
|
DataNodeTestUtils.triggerDeletionReport(dn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void triggerHeartbeats()
|
public void triggerHeartbeats()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (DataNode dn : getDataNodes()) {
|
for (DataNode dn : getDataNodes()) {
|
||||||
DataNodeAdapter.triggerHeartbeat(dn);
|
DataNodeTestUtils.triggerHeartbeat(dn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class TestDFSAddressConfig extends TestCase {
|
||||||
|
|
||||||
String selfSocketAddr = dn.getSelfAddr().toString();
|
String selfSocketAddr = dn.getSelfAddr().toString();
|
||||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||||
assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
|
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||||
|
|
||||||
/*-------------------------------------------------------------------------
|
/*-------------------------------------------------------------------------
|
||||||
* Shut down the datanodes, reconfigure, and bring them back up.
|
* Shut down the datanodes, reconfigure, and bring them back up.
|
||||||
|
@ -78,7 +78,7 @@ public class TestDFSAddressConfig extends TestCase {
|
||||||
selfSocketAddr = dn.getSelfAddr().toString();
|
selfSocketAddr = dn.getSelfAddr().toString();
|
||||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||||
// assert that default self socket address is 127.0.0.1
|
// assert that default self socket address is 127.0.0.1
|
||||||
assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
|
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||||
|
|
||||||
/*-------------------------------------------------------------------------
|
/*-------------------------------------------------------------------------
|
||||||
* Shut down the datanodes, reconfigure, and bring them back up.
|
* Shut down the datanodes, reconfigure, and bring them back up.
|
||||||
|
@ -103,7 +103,7 @@ public class TestDFSAddressConfig extends TestCase {
|
||||||
selfSocketAddr = dn.getSelfAddr().toString();
|
selfSocketAddr = dn.getSelfAddr().toString();
|
||||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||||
// assert that default self socket address is 0.0.0.0
|
// assert that default self socket address is 0.0.0.0
|
||||||
assertTrue(selfSocketAddr.startsWith("/0.0.0.0:"));
|
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
|
||||||
|
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,7 @@ public class TestGetBlocks extends TestCase {
|
||||||
namenode.getBlocks(new DatanodeInfo(), 2);
|
namenode.getBlocks(new DatanodeInfo(), 2);
|
||||||
} catch(RemoteException e) {
|
} catch(RemoteException e) {
|
||||||
getException = true;
|
getException = true;
|
||||||
assertTrue(e.getMessage().contains("IllegalArgumentException"));
|
assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
|
||||||
}
|
}
|
||||||
assertTrue(getException);
|
assertTrue(getException);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,8 @@
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
@ -454,7 +454,7 @@ public class TestLeaseRecovery2 {
|
||||||
// Make sure the DNs don't send a heartbeat for a while, so the blocks
|
// Make sure the DNs don't send a heartbeat for a while, so the blocks
|
||||||
// won't actually get completed during lease recovery.
|
// won't actually get completed during lease recovery.
|
||||||
for (DataNode dn : cluster.getDataNodes()) {
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
DataNodeAdapter.setHeartbeatsDisabledForTests(dn, true);
|
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the hard limit to be 1 second
|
// set the hard limit to be 1 second
|
||||||
|
@ -474,7 +474,7 @@ public class TestLeaseRecovery2 {
|
||||||
|
|
||||||
// Let the DNs send heartbeats again.
|
// Let the DNs send heartbeats again.
|
||||||
for (DataNode dn : cluster.getDataNodes()) {
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
DataNodeAdapter.setHeartbeatsDisabledForTests(dn, false);
|
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
|
@ -17,6 +17,13 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
|
@ -26,22 +33,16 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Random;
|
|
||||||
|
|
||||||
public class TestPipelines {
|
public class TestPipelines {
|
||||||
public static final Log LOG = LogFactory.getLog(TestPipelines.class);
|
public static final Log LOG = LogFactory.getLog(TestPipelines.class);
|
||||||
|
|
||||||
|
@ -105,7 +106,7 @@ public class TestPipelines {
|
||||||
|
|
||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
for (DataNode dn : cluster.getDataNodes()) {
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0)
|
Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
|
||||||
.getBlock().getBlockId());
|
.getBlock().getBlockId());
|
||||||
|
|
||||||
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
|
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
|
||||||
|
|
|
@ -1,111 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.mockito.Mockito;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* WARNING!! This is TEST ONLY class: it never has to be used
|
|
||||||
* for ANY development purposes.
|
|
||||||
*
|
|
||||||
* This is a utility class to expose DataNode functionality for
|
|
||||||
* unit and functional tests.
|
|
||||||
*/
|
|
||||||
public class DataNodeAdapter {
|
|
||||||
/**
|
|
||||||
* Fetch a copy of ReplicaInfo from a datanode by block id
|
|
||||||
* @param dn datanode to retrieve a replicainfo object from
|
|
||||||
* @param bpid Block pool Id
|
|
||||||
* @param blkId id of the replica's block
|
|
||||||
* @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo}
|
|
||||||
*/
|
|
||||||
public static ReplicaInfo fetchReplicaInfo (final DataNode dn,
|
|
||||||
final String bpid,
|
|
||||||
final long blkId) {
|
|
||||||
return ((FSDataset)dn.data).fetchReplicaInfo(bpid, blkId);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void setHeartbeatsDisabledForTests(DataNode dn,
|
|
||||||
boolean heartbeatsDisabledForTests) {
|
|
||||||
dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void triggerDeletionReport(DataNode dn) throws IOException {
|
|
||||||
for (BPOfferService bpos : dn.getAllBpOs()) {
|
|
||||||
bpos.triggerDeletionReportForTests();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void triggerHeartbeat(DataNode dn) throws IOException {
|
|
||||||
for (BPOfferService bpos : dn.getAllBpOs()) {
|
|
||||||
bpos.triggerHeartbeatForTests();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void triggerBlockReport(DataNode dn) throws IOException {
|
|
||||||
for (BPOfferService bpos : dn.getAllBpOs()) {
|
|
||||||
bpos.triggerBlockReportForTests();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static long getPendingAsyncDeletions(DataNode dn) {
|
|
||||||
FSDataset fsd = (FSDataset)dn.getFSDataset();
|
|
||||||
return fsd.asyncDiskService.countPendingDeletions();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Insert a Mockito spy object between the given DataNode and
|
|
||||||
* the given NameNode. This can be used to delay or wait for
|
|
||||||
* RPC calls on the datanode->NN path.
|
|
||||||
*/
|
|
||||||
public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN(
|
|
||||||
DataNode dn, NameNode nn) {
|
|
||||||
String bpid = nn.getNamesystem().getBlockPoolId();
|
|
||||||
|
|
||||||
BPOfferService bpos = null;
|
|
||||||
for (BPOfferService thisBpos : dn.getAllBpOs()) {
|
|
||||||
if (thisBpos.getBlockPoolId().equals(bpid)) {
|
|
||||||
bpos = thisBpos;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Preconditions.checkArgument(bpos != null,
|
|
||||||
"No such bpid: %s", bpid);
|
|
||||||
|
|
||||||
BPServiceActor bpsa = null;
|
|
||||||
for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) {
|
|
||||||
if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) {
|
|
||||||
bpsa = thisBpsa;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Preconditions.checkArgument(bpsa != null,
|
|
||||||
"No service actor to NN at %s", nn.getServiceRpcAddress());
|
|
||||||
|
|
||||||
DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy();
|
|
||||||
DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN);
|
|
||||||
bpsa.setNameNode(spy);
|
|
||||||
return spy;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -24,8 +24,13 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility class for accessing package-private DataNode information during tests.
|
* Utility class for accessing package-private DataNode information during tests.
|
||||||
|
@ -42,6 +47,64 @@ public class DataNodeTestUtils {
|
||||||
return dn.getDNRegistrationForBP(bpid);
|
return dn.getDNRegistrationForBP(bpid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void setHeartbeatsDisabledForTests(DataNode dn,
|
||||||
|
boolean heartbeatsDisabledForTests) {
|
||||||
|
dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void triggerDeletionReport(DataNode dn) throws IOException {
|
||||||
|
for (BPOfferService bpos : dn.getAllBpOs()) {
|
||||||
|
bpos.triggerDeletionReportForTests();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void triggerHeartbeat(DataNode dn) throws IOException {
|
||||||
|
for (BPOfferService bpos : dn.getAllBpOs()) {
|
||||||
|
bpos.triggerHeartbeatForTests();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void triggerBlockReport(DataNode dn) throws IOException {
|
||||||
|
for (BPOfferService bpos : dn.getAllBpOs()) {
|
||||||
|
bpos.triggerBlockReportForTests();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Insert a Mockito spy object between the given DataNode and
|
||||||
|
* the given NameNode. This can be used to delay or wait for
|
||||||
|
* RPC calls on the datanode->NN path.
|
||||||
|
*/
|
||||||
|
public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN(
|
||||||
|
DataNode dn, NameNode nn) {
|
||||||
|
String bpid = nn.getNamesystem().getBlockPoolId();
|
||||||
|
|
||||||
|
BPOfferService bpos = null;
|
||||||
|
for (BPOfferService thisBpos : dn.getAllBpOs()) {
|
||||||
|
if (thisBpos.getBlockPoolId().equals(bpid)) {
|
||||||
|
bpos = thisBpos;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Preconditions.checkArgument(bpos != null,
|
||||||
|
"No such bpid: %s", bpid);
|
||||||
|
|
||||||
|
BPServiceActor bpsa = null;
|
||||||
|
for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) {
|
||||||
|
if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) {
|
||||||
|
bpsa = thisBpsa;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Preconditions.checkArgument(bpsa != null,
|
||||||
|
"No service actor to NN at %s", nn.getServiceRpcAddress());
|
||||||
|
|
||||||
|
DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy();
|
||||||
|
DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN);
|
||||||
|
bpsa.setNameNode(spy);
|
||||||
|
return spy;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method is used for testing.
|
* This method is used for testing.
|
||||||
* Examples are adding and deleting blocks directly.
|
* Examples are adding and deleting blocks directly.
|
||||||
|
@ -53,18 +116,37 @@ public class DataNodeTestUtils {
|
||||||
return dn.getFSDataset();
|
return dn.getFSDataset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static FSDataset getFsDatasetImpl(DataNode dn) {
|
||||||
|
return (FSDataset)dn.getFSDataset();
|
||||||
|
}
|
||||||
|
|
||||||
public static File getFile(DataNode dn, String bpid, long bid) {
|
public static File getFile(DataNode dn, String bpid, long bid) {
|
||||||
return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid);
|
return getFsDatasetImpl(dn).getFile(bpid, bid);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static File getBlockFile(DataNode dn, String bpid, Block b
|
public static File getBlockFile(DataNode dn, String bpid, Block b
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
return ((FSDataset)dn.getFSDataset()).getBlockFile(bpid, b);
|
return getFsDatasetImpl(dn).getBlockFile(bpid, b);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks
|
public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
ReplicaInfo info = ((FSDataset)dn.getFSDataset()).getReplicaInfo(block);
|
return getFsDatasetImpl(dn).getReplicaInfo(block).unlinkBlock(numLinks);
|
||||||
return info.unlinkBlock(numLinks);
|
}
|
||||||
|
|
||||||
|
public static long getPendingAsyncDeletions(DataNode dn) {
|
||||||
|
return getFsDatasetImpl(dn).asyncDiskService.countPendingDeletions();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch a copy of ReplicaInfo from a datanode by block id
|
||||||
|
* @param dn datanode to retrieve a replicainfo object from
|
||||||
|
* @param bpid Block pool Id
|
||||||
|
* @param blkId id of the replica's block
|
||||||
|
* @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo}
|
||||||
|
*/
|
||||||
|
public static ReplicaInfo fetchReplicaInfo(final DataNode dn,
|
||||||
|
final String bpid, final long blkId) {
|
||||||
|
return getFsDatasetImpl(dn).fetchReplicaInfo(bpid, blkId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
|
||||||
|
|
|
@ -17,6 +17,17 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FilenameFilter;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
|
@ -40,27 +51,17 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import org.mockito.invocation.InvocationOnMock;
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FilenameFilter;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Random;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This test simulates a variety of situations when blocks are being
|
* This test simulates a variety of situations when blocks are being
|
||||||
* intentionally orrupted, unexpectedly modified, and so on before a block
|
* intentionally orrupted, unexpectedly modified, and so on before a block
|
||||||
|
@ -561,7 +562,7 @@ public class TestBlockReport {
|
||||||
// from this node.
|
// from this node.
|
||||||
DataNode dn = cluster.getDataNodes().get(0);
|
DataNode dn = cluster.getDataNodes().get(0);
|
||||||
DatanodeProtocolClientSideTranslatorPB spy =
|
DatanodeProtocolClientSideTranslatorPB spy =
|
||||||
DataNodeAdapter.spyOnBposToNN(dn, nn);
|
DataNodeTestUtils.spyOnBposToNN(dn, nn);
|
||||||
|
|
||||||
Mockito.doAnswer(delayer)
|
Mockito.doAnswer(delayer)
|
||||||
.when(spy).blockReport(
|
.when(spy).blockReport(
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
|
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
|
||||||
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
|
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
|
||||||
|
@ -83,8 +83,8 @@ public class HAStressTestHarness {
|
||||||
@Override
|
@Override
|
||||||
public void doAnAction() throws Exception {
|
public void doAnAction() throws Exception {
|
||||||
for (DataNode dn : cluster.getDataNodes()) {
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
DataNodeAdapter.triggerDeletionReport(dn);
|
DataNodeTestUtils.triggerDeletionReport(dn);
|
||||||
DataNodeAdapter.triggerHeartbeat(dn);
|
DataNodeTestUtils.triggerHeartbeat(dn);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
NameNode nn = cluster.getNameNode(i);
|
NameNode nn = cluster.getNameNode(i);
|
||||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
@ -96,7 +96,7 @@ public abstract class HATestUtil {
|
||||||
@Override
|
@Override
|
||||||
public Boolean get() {
|
public Boolean get() {
|
||||||
for (DataNode dn : cluster.getDataNodes()) {
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
if (DataNodeAdapter.getPendingAsyncDeletions(dn) > 0) {
|
if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.PrintWriter;
|
import java.io.PrintWriter;
|
||||||
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
|
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
@ -61,7 +60,6 @@ import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Ignore;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import org.mockito.invocation.InvocationOnMock;
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
@ -72,9 +70,7 @@ import com.google.common.collect.Lists;
|
||||||
|
|
||||||
public class TestDNFencing {
|
public class TestDNFencing {
|
||||||
|
|
||||||
protected static final Log LOG = LogFactory.getLog(
|
protected static final Log LOG = LogFactory.getLog(TestDNFencing.class);
|
||||||
TestDNFencing.class);
|
|
||||||
private static final String TEST_FILE_DATA = "hello highly available world";
|
|
||||||
private static final String TEST_FILE = "/testStandbyIsHot";
|
private static final String TEST_FILE = "/testStandbyIsHot";
|
||||||
private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
|
private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
|
||||||
private static final int SMALL_BLOCK = 1024;
|
private static final int SMALL_BLOCK = 1024;
|
||||||
|
@ -497,7 +493,7 @@ public class TestDNFencing {
|
||||||
|
|
||||||
DataNode dn = cluster.getDataNodes().get(0);
|
DataNode dn = cluster.getDataNodes().get(0);
|
||||||
DatanodeProtocolClientSideTranslatorPB spy =
|
DatanodeProtocolClientSideTranslatorPB spy =
|
||||||
DataNodeAdapter.spyOnBposToNN(dn, nn2);
|
DataNodeTestUtils.spyOnBposToNN(dn, nn2);
|
||||||
|
|
||||||
Mockito.doAnswer(delayer)
|
Mockito.doAnswer(delayer)
|
||||||
.when(spy).blockReport(
|
.when(spy).blockReport(
|
||||||
|
|
|
@ -17,7 +17,10 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
@ -45,7 +48,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -54,9 +57,7 @@ import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
||||||
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
|
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
|
||||||
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
|
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
|
||||||
|
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
@ -297,7 +298,7 @@ public class TestPipelinesFailover {
|
||||||
// active.
|
// active.
|
||||||
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
|
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
|
||||||
DatanodeProtocolClientSideTranslatorPB nnSpy =
|
DatanodeProtocolClientSideTranslatorPB nnSpy =
|
||||||
DataNodeAdapter.spyOnBposToNN(primaryDN, nn0);
|
DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
|
||||||
|
|
||||||
// Delay the commitBlockSynchronization call
|
// Delay the commitBlockSynchronization call
|
||||||
DelayAnswer delayer = new DelayAnswer(LOG);
|
DelayAnswer delayer = new DelayAnswer(LOG);
|
||||||
|
|
|
@ -35,14 +35,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
@ -225,7 +225,7 @@ public class TestStandbyIsHot {
|
||||||
LOG.info("Got " + numReplicas + " locs: " + locs);
|
LOG.info("Got " + numReplicas + " locs: " + locs);
|
||||||
if (numReplicas > expectedReplicas) {
|
if (numReplicas > expectedReplicas) {
|
||||||
for (DataNode dn : cluster.getDataNodes()) {
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
DataNodeAdapter.triggerDeletionReport(dn);
|
DataNodeTestUtils.triggerDeletionReport(dn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return numReplicas == expectedReplicas;
|
return numReplicas == expectedReplicas;
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.ha.HAServiceStatus;
|
||||||
import org.apache.hadoop.ha.HAServiceTarget;
|
import org.apache.hadoop.ha.HAServiceTarget;
|
||||||
import org.apache.hadoop.ha.HealthCheckFailedException;
|
import org.apache.hadoop.ha.HealthCheckFailedException;
|
||||||
import org.apache.hadoop.ha.NodeFencer;
|
import org.apache.hadoop.ha.NodeFencer;
|
||||||
|
import org.apache.hadoop.test.MockitoUtil;
|
||||||
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -79,7 +80,7 @@ public class TestDFSHAAdmin {
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() throws IOException {
|
public void setup() throws IOException {
|
||||||
mockProtocol = Mockito.mock(HAServiceProtocol.class);
|
mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
|
||||||
tool = new DFSHAAdmin() {
|
tool = new DFSHAAdmin() {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs-project</artifactId>
|
<artifactId>hadoop-hdfs-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop HDFS Project</description>
|
<description>Apache Hadoop HDFS Project</description>
|
||||||
<name>Apache Hadoop HDFS Project</name>
|
<name>Apache Hadoop HDFS Project</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
|
@ -96,7 +96,7 @@ Trunk (unreleased changes)
|
||||||
MAPREDUCE-1740. NPE in getMatchingLevelForNodes when node locations are
|
MAPREDUCE-1740. NPE in getMatchingLevelForNodes when node locations are
|
||||||
variable depth (ahmed via tucu) [IMPORTANT: this is dead code in trunk]
|
variable depth (ahmed via tucu) [IMPORTANT: this is dead code in trunk]
|
||||||
|
|
||||||
Release 0.23.3 - UNRELEASED
|
Release 2.0.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
<property name="Name" value="Hadoop-Mapred"/>
|
<property name="Name" value="Hadoop-Mapred"/>
|
||||||
<property name="name" value="hadoop-${module}"/>
|
<property name="name" value="hadoop-${module}"/>
|
||||||
<!-- Need to change aop.xml project.version prop. synchronously -->
|
<!-- Need to change aop.xml project.version prop. synchronously -->
|
||||||
<property name="_version" value="0.24.0"/>
|
<property name="_version" value="3.0.0"/>
|
||||||
<property name="version" value="${_version}-SNAPSHOT"/>
|
<property name="version" value="${_version}-SNAPSHOT"/>
|
||||||
<property name="final.name" value="${name}-${version}"/>
|
<property name="final.name" value="${name}-${version}"/>
|
||||||
<property name="test.final.name" value="${name}-test-${version}"/>
|
<property name="test.final.name" value="${name}-test-${version}"/>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-mapreduce-client</artifactId>
|
<artifactId>hadoop-mapreduce-client</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-client-app</artifactId>
|
<artifactId>hadoop-mapreduce-client-app</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-mapreduce-client-app</name>
|
<name>hadoop-mapreduce-client-app</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
@ -112,7 +112,7 @@
|
||||||
<target>
|
<target>
|
||||||
<symlink link="${applink.base}.jar"
|
<symlink link="${applink.base}.jar"
|
||||||
resource="mr-app.jar" failonerror="false"/>
|
resource="mr-app.jar" failonerror="false"/>
|
||||||
<symlink link="${applink.base}-0.24.0-SNAPSHOT.jar"
|
<symlink link="${applink.base}-3.0.0-SNAPSHOT.jar"
|
||||||
resource="mr-app.jar" failonerror="false"/>
|
resource="mr-app.jar" failonerror="false"/>
|
||||||
</target>
|
</target>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-mapreduce-client</artifactId>
|
<artifactId>hadoop-mapreduce-client</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-client-common</artifactId>
|
<artifactId>hadoop-mapreduce-client-common</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-mapreduce-client-common</name>
|
<name>hadoop-mapreduce-client-common</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-mapreduce-client</artifactId>
|
<artifactId>hadoop-mapreduce-client</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-client-core</artifactId>
|
<artifactId>hadoop-mapreduce-client-core</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-mapreduce-client-core</name>
|
<name>hadoop-mapreduce-client-core</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-mapreduce-client</artifactId>
|
<artifactId>hadoop-mapreduce-client</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-client-hs</artifactId>
|
<artifactId>hadoop-mapreduce-client-hs</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-mapreduce-client-hs</name>
|
<name>hadoop-mapreduce-client-hs</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-mapreduce-client</artifactId>
|
<artifactId>hadoop-mapreduce-client</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
|
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-mapreduce-client-jobclient</name>
|
<name>hadoop-mapreduce-client-jobclient</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-mapreduce-client</artifactId>
|
<artifactId>hadoop-mapreduce-client</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-client-shuffle</artifactId>
|
<artifactId>hadoop-mapreduce-client-shuffle</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-mapreduce-client-shuffle</name>
|
<name>hadoop-mapreduce-client-shuffle</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-client</artifactId>
|
<artifactId>hadoop-mapreduce-client</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-mapreduce-client</name>
|
<name>hadoop-mapreduce-client</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce-examples</artifactId>
|
<artifactId>hadoop-mapreduce-examples</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop MapReduce Examples</description>
|
<description>Apache Hadoop MapReduce Examples</description>
|
||||||
<name>Apache Hadoop MapReduce Examples</name>
|
<name>Apache Hadoop MapReduce Examples</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn</artifactId>
|
<artifactId>hadoop-yarn</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-api</artifactId>
|
<artifactId>hadoop-yarn-api</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-api</name>
|
<name>hadoop-yarn-api</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn-applications</artifactId>
|
<artifactId>hadoop-yarn-applications</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-applications-distributedshell</artifactId>
|
<artifactId>hadoop-yarn-applications-distributedshell</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-applications-distributedshell</name>
|
<name>hadoop-yarn-applications-distributedshell</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn</artifactId>
|
<artifactId>hadoop-yarn</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-applications</artifactId>
|
<artifactId>hadoop-yarn-applications</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-applications</name>
|
<name>hadoop-yarn-applications</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn</artifactId>
|
<artifactId>hadoop-yarn</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-common</artifactId>
|
<artifactId>hadoop-yarn-common</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-common</name>
|
<name>hadoop-yarn-common</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn-server</artifactId>
|
<artifactId>hadoop-yarn-server</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-server-common</artifactId>
|
<artifactId>hadoop-yarn-server-common</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-server-common</name>
|
<name>hadoop-yarn-server-common</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn-server</artifactId>
|
<artifactId>hadoop-yarn-server</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-server-nodemanager</artifactId>
|
<artifactId>hadoop-yarn-server-nodemanager</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-server-nodemanager</name>
|
<name>hadoop-yarn-server-nodemanager</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn-server</artifactId>
|
<artifactId>hadoop-yarn-server</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-server-resourcemanager</artifactId>
|
<artifactId>hadoop-yarn-server-resourcemanager</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-server-resourcemanager</name>
|
<name>hadoop-yarn-server-resourcemanager</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,11 +16,11 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn-server</artifactId>
|
<artifactId>hadoop-yarn-server</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-server-tests</artifactId>
|
<artifactId>hadoop-yarn-server-tests</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-server-tests</name>
|
<name>hadoop-yarn-server-tests</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn-server</artifactId>
|
<artifactId>hadoop-yarn-server</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-server-web-proxy</artifactId>
|
<artifactId>hadoop-yarn-server-web-proxy</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-server-web-proxy</name>
|
<name>hadoop-yarn-server-web-proxy</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn</artifactId>
|
<artifactId>hadoop-yarn</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-server</artifactId>
|
<artifactId>hadoop-yarn-server</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-server</name>
|
<name>hadoop-yarn-server</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
|
|
|
@ -16,12 +16,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hadoop-yarn</artifactId>
|
<artifactId>hadoop-yarn</artifactId>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn-site</artifactId>
|
<artifactId>hadoop-yarn-site</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<name>hadoop-yarn-site</name>
|
<name>hadoop-yarn-site</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-yarn</artifactId>
|
<artifactId>hadoop-yarn</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<name>hadoop-yarn</name>
|
<name>hadoop-yarn</name>
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -41,8 +41,8 @@ ftplet-api.version=1.0.0
|
||||||
ftpserver-core.version=1.0.0
|
ftpserver-core.version=1.0.0
|
||||||
ftpserver-deprecated.version=1.0.0-M2
|
ftpserver-deprecated.version=1.0.0-M2
|
||||||
|
|
||||||
hadoop-common.version=0.24.0-SNAPSHOT
|
hadoop-common.version=3.0.0-SNAPSHOT
|
||||||
hadoop-hdfs.version=0.24.0-SNAPSHOT
|
hadoop-hdfs.version=3.0.0-SNAPSHOT
|
||||||
|
|
||||||
hsqldb.version=1.8.0.10
|
hsqldb.version=1.8.0.10
|
||||||
|
|
||||||
|
@ -82,5 +82,5 @@ xmlenc.version=0.52
|
||||||
xerces.version=1.4.4
|
xerces.version=1.4.4
|
||||||
|
|
||||||
jackson.version=1.8.8
|
jackson.version=1.8.8
|
||||||
yarn.version=0.24.0-SNAPSHOT
|
yarn.version=3.0.0-SNAPSHOT
|
||||||
hadoop-mapreduce.version=0.24.0-SNAPSHOT
|
hadoop-mapreduce.version=3.0.0-SNAPSHOT
|
||||||
|
|
|
@ -18,12 +18,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-mapreduce</artifactId>
|
<artifactId>hadoop-mapreduce</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<name>hadoop-mapreduce</name>
|
<name>hadoop-mapreduce</name>
|
||||||
<url>http://hadoop.apache.org/mapreduce/</url>
|
<url>http://hadoop.apache.org/mapreduce/</url>
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
<property name="aspectversion" value="1.6.5"/>
|
<property name="aspectversion" value="1.6.5"/>
|
||||||
<!-- TODO this has to be changed synchronously with build.xml version prop.-->
|
<!-- TODO this has to be changed synchronously with build.xml version prop.-->
|
||||||
<!-- this workarounds of test-patch setting its own 'version' -->
|
<!-- this workarounds of test-patch setting its own 'version' -->
|
||||||
<property name="project.version" value="0.24.0-SNAPSHOT"/>
|
<property name="project.version" value="3.0.0-SNAPSHOT"/>
|
||||||
|
|
||||||
<!-- Properties common for all fault injections -->
|
<!-- Properties common for all fault injections -->
|
||||||
<property name="build-fi.dir" value="${basedir}/build-fi"/>
|
<property name="build-fi.dir" value="${basedir}/build-fi"/>
|
||||||
|
|
|
@ -18,12 +18,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<description>Apache Hadoop Mini-Cluster</description>
|
<description>Apache Hadoop Mini-Cluster</description>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project-dist</artifactId>
|
<artifactId>hadoop-project-dist</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Project Dist POM</description>
|
<description>Apache Hadoop Project Dist POM</description>
|
||||||
<name>Apache Hadoop Project Dist POM</name>
|
<name>Apache Hadoop Project Dist POM</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
|
@ -17,11 +17,11 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-main</artifactId>
|
<artifactId>hadoop-main</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Project POM</description>
|
<description>Apache Hadoop Project POM</description>
|
||||||
<name>Apache Hadoop Project POM</name>
|
<name>Apache Hadoop Project POM</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-archives</artifactId>
|
<artifactId>hadoop-archives</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Archives</description>
|
<description>Apache Hadoop Archives</description>
|
||||||
<name>Apache Hadoop Archives</name>
|
<name>Apache Hadoop Archives</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-distcp</artifactId>
|
<artifactId>hadoop-distcp</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Distributed Copy</description>
|
<description>Apache Hadoop Distributed Copy</description>
|
||||||
<name>Apache Hadoop Distributed Copy</name>
|
<name>Apache Hadoop Distributed Copy</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-extras</artifactId>
|
<artifactId>hadoop-extras</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Extras</description>
|
<description>Apache Hadoop Extras</description>
|
||||||
<name>Apache Hadoop Extras</name>
|
<name>Apache Hadoop Extras</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-rumen</artifactId>
|
<artifactId>hadoop-rumen</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Rumen</description>
|
<description>Apache Hadoop Rumen</description>
|
||||||
<name>Apache Hadoop Rumen</name>
|
<name>Apache Hadoop Rumen</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project</relativePath>
|
<relativePath>../../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-streaming</artifactId>
|
<artifactId>hadoop-streaming</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop MapReduce Streaming</description>
|
<description>Apache Hadoop MapReduce Streaming</description>
|
||||||
<name>Apache Hadoop MapReduce Streaming</name>
|
<name>Apache Hadoop MapReduce Streaming</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project-dist</artifactId>
|
<artifactId>hadoop-project-dist</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../../hadoop-project-dist</relativePath>
|
<relativePath>../../hadoop-project-dist</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-tools-dist</artifactId>
|
<artifactId>hadoop-tools-dist</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Tools Dist</description>
|
<description>Apache Hadoop Tools Dist</description>
|
||||||
<name>Apache Hadoop Tools Dist</name>
|
<name>Apache Hadoop Tools Dist</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-project</artifactId>
|
<artifactId>hadoop-project</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../hadoop-project</relativePath>
|
<relativePath>../hadoop-project</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-tools</artifactId>
|
<artifactId>hadoop-tools</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Tools</description>
|
<description>Apache Hadoop Tools</description>
|
||||||
<name>Apache Hadoop Tools</name>
|
<name>Apache Hadoop Tools</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
2
pom.xml
2
pom.xml
|
@ -16,7 +16,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-main</artifactId>
|
<artifactId>hadoop-main</artifactId>
|
||||||
<version>0.24.0-SNAPSHOT</version>
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
<description>Apache Hadoop Main</description>
|
<description>Apache Hadoop Main</description>
|
||||||
<name>Apache Hadoop Main</name>
|
<name>Apache Hadoop Main</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
Loading…
Reference in New Issue