HDFS-9427. HDFS should not default to ephemeral ports. Contributed by Xiaobing Zhou.

This commit is contained in:
Jing Zhao 2016-04-22 15:14:40 -07:00
parent c610031cab
commit 63e5412f1a
40 changed files with 319 additions and 317 deletions

View File

@ -93,7 +93,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/**
* CallQueue related settings. These are not used directly, but rather
* combined with a namespace and port. For instance:
* IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
* IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY
*/
public static final String IPC_NAMESPACE = "ipc";
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";

View File

@ -685,8 +685,8 @@ public abstract class FileSystem extends Configured implements Closeable {
return new BlockLocation[0];
}
String[] name = { "localhost:50010" };
String[] host = { "localhost" };
String[] name = {"localhost:9866"};
String[] host = {"localhost"};
return new BlockLocation[] {
new BlockLocation(name, host, 0, file.getLen()) };
}

View File

@ -82,11 +82,11 @@ import org.apache.hadoop.security.UserGroupInformation;
*
* <p>Examples:</p>
* <p><blockquote><pre>
* $ bin/hadoop dfs -fs darwin:8020 -ls /data
* list /data directory in dfs with namenode darwin:8020
* $ bin/hadoop dfs -fs darwin:9820 -ls /data
* list /data directory in dfs with namenode darwin:9820
*
* $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
* list /data directory in dfs with namenode darwin:8020
* $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data
* list /data directory in dfs with namenode darwin:9820
*
* $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
* list /data directory in dfs with multiple conf files specified.

View File

@ -318,7 +318,7 @@ Once the Hadoop cluster is up and running check the web-ui of the components as
| Daemon | Web Interface | Notes |
|:---- |:---- |:---- |
| NameNode | http://nn_host:port/ | Default HTTP port is 50070. |
| NameNode | http://nn_host:port/ | Default HTTP port is 9870. |
| ResourceManager | http://rm_host:port/ | Default HTTP port is 8088. |
| MapReduce JobHistory Server | http://jhs_host:port/ | Default HTTP port is 19888. |

View File

@ -212,7 +212,7 @@ Usage:
Get/Set the log level for a Log identified by a qualified class name in the daemon.
Example: $ bin/hadoop daemonlog -setlevel 127.0.0.1:50070 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
Example: $ bin/hadoop daemonlog -setlevel 127.0.0.1:9870 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
Files
-----

View File

@ -256,15 +256,15 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
| Parameter | Value | Notes |
|:-----------------------------|:------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `dfs.http.policy` | `HTTP_ONLY` or `HTTPS_ONLY` or `HTTP_AND_HTTPS` | `HTTPS_ONLY` turns off http access. This option takes precedence over the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. If using SASL to authenticate data transfer protocol instead of running DataNode as root and using privileged ports, then this property must be set to `HTTPS_ONLY` to guarantee authentication of HTTP servers. (See `dfs.data.transfer.protection`.) |
| `dfs.namenode.https-address` | `0.0.0.0:50470` | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details. |
| `dfs.namenode.https-address` | `0.0.0.0:9871` | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details. |
| `dfs.https.enable` | `true` | This value is deprecated. `Use dfs.http.policy` |
### Secondary NameNode
| Parameter | Value | Notes |
|:------------------------------------------------------------|:-----------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `dfs.namenode.secondary.http-address` | `0.0.0.0:50090` | HTTP web UI address for the Secondary NameNode. |
| `dfs.namenode.secondary.https-address` | `0.0.0.0:50091` | HTTPS web UI address for the Secondary NameNode. |
| `dfs.namenode.secondary.http-address` | `0.0.0.0:9868` | HTTP web UI address for the Secondary NameNode. |
| `dfs.namenode.secondary.https-address` | `0.0.0.0:9869` | HTTPS web UI address for the Secondary NameNode. |
| `dfs.secondary.namenode.keytab.file` | `/etc/security/keytab/sn.service.keytab` | Kerberos keytab file for the Secondary NameNode. |
| `dfs.secondary.namenode.kerberos.principal` | `sn/_HOST@REALM.TLD` | Kerberos principal name for the Secondary NameNode. |
| `dfs.secondary.namenode.kerberos.internal.spnego.principal` | `HTTP/_HOST@REALM.TLD` | The server principal used by the Secondary NameNode for web UI SPNEGO authentication. The SPNEGO server principal begins with the prefix `HTTP/` by convention. If the value is `'*'`, the web server will attempt to login with every principal specified in the keytab file `dfs.web.authentication.kerberos.keytab`. For most deployments this can be set to `${dfs.web.authentication.kerberos.principal}` i.e use the value of `dfs.web.authentication.kerberos.principal`. |
@ -286,7 +286,7 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
| `dfs.datanode.data.dir.perm` | `700` | |
| `dfs.datanode.address` | `0.0.0.0:1004` | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc. Alternatively, this must be set to a non-privileged port if using SASL to authenticate data transfer protocol. (See `dfs.data.transfer.protection`.) |
| `dfs.datanode.http.address` | `0.0.0.0:1006` | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc. |
| `dfs.datanode.https.address` | `0.0.0.0:50475` | HTTPS web UI address for the Data Node. |
| `dfs.datanode.https.address` | `0.0.0.0:9865` | HTTPS web UI address for the Data Node. |
| `dfs.datanode.kerberos.principal` | `dn/_HOST@REALM.TLD` | Kerberos principal name for the DataNode. |
| `dfs.datanode.keytab.file` | `/etc/security/keytab/dn.service.keytab` | Kerberos keytab file for the DataNode. |
| `dfs.encrypt.data.transfer` | `false` | set to `true` when using data encryption |

View File

@ -144,7 +144,7 @@ The following instructions are to run a MapReduce job locally. If you want to ex
3. Browse the web interface for the NameNode; by default it is available at:
* NameNode - `http://localhost:50070/`
* NameNode - `http://localhost:9870/`
4. Make the HDFS directories required to execute MapReduce jobs:

View File

@ -63,7 +63,7 @@ You need to run the command against all servers if you want to update the config
ID CLASS
1 org.apache.htrace.core.LocalFileSpanReceiver
$ hadoop trace -list -host 192.168.56.2:50020
$ hadoop trace -list -host 192.168.56.2:9867
ID CLASS
1 org.apache.htrace.core.LocalFileSpanReceiver

View File

@ -278,7 +278,7 @@ of `getFileBlockLocations()` on a directory is []
If the filesystem is not location aware, it SHOULD return
[
BlockLocation(["localhost:50010"] ,
BlockLocation(["localhost:9866"] ,
["localhost"],
["/default/localhost"]
0, F.getLen())

View File

@ -47,6 +47,6 @@ public class TestDelegateToFileSystem {
@Test
public void testDefaultURIwithPort() throws Exception {
testDefaultUriInternal("hdfs://dummyhost:8020");
testDefaultUriInternal("hdfs://dummyhost:9820");
}
}

View File

@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort {
private static String TEST_FENCING_HOST = System.getProperty(
"test.TestSshFenceByTcpPort.host", "localhost");
private static final String TEST_FENCING_PORT = System.getProperty(
"test.TestSshFenceByTcpPort.port", "8020");
"test.TestSshFenceByTcpPort.port", "9820");
private static final String TEST_KEYFILE = System.getProperty(
"test.TestSshFenceByTcpPort.key");

View File

@ -65,12 +65,12 @@ public interface HdfsClientConfigKeys {
String PREFIX = "dfs.client.";
String DFS_NAMESERVICES = "dfs.nameservices";
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 9870;
String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
"dfs.namenode.kerberos.principal";
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";

View File

@ -166,9 +166,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT =
"0.0.0.0:9868";
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY = "dfs.namenode.secondary.https-address";
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50091";
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT =
"0.0.0.0:9869";
public static final String DFS_NAMENODE_CHECKPOINT_QUIET_MULTIPLIER_KEY = "dfs.namenode.checkpoint.check.quiet-multiplier";
public static final double DFS_NAMENODE_CHECKPOINT_QUIET_MULTIPLIER_DEFAULT = 1.5;
public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
@ -467,7 +469,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
public static final int DFS_DATANODE_DEFAULT_PORT = 50010;
public static final int DFS_DATANODE_DEFAULT_PORT = 9866;
public static final String DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_DEFAULT_PORT;
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_KEY = "dfs.datanode.data.dir.perm";
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT = "700";
@ -499,7 +501,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
public static final String DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 9864;
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
@ -538,10 +540,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
public static final String DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";
public static final String DFS_DATANODE_HTTPS_PORT_KEY = "datanode.https.port";
public static final int DFS_DATANODE_HTTPS_DEFAULT_PORT = 50475;
public static final int DFS_DATANODE_HTTPS_DEFAULT_PORT = 9865;
public static final String DFS_DATANODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTPS_DEFAULT_PORT;
public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 9867;
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_IPC_DEFAULT_PORT;
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";

View File

@ -52,7 +52,7 @@ import org.apache.hadoop.util.ExitUtil;
* hadoop:service=NameNode,name=FSNamesystemState (static)
* hadoop:service=NameNode,name=NameNodeActivity (dynamic)
* hadoop:service=NameNode,name=RpcActivityForPort9000 (dynamic)
* hadoop:service=DataNode,name=RpcActivityForPort50020 (dynamic)
* hadoop:service=DataNode,name=RpcActivityForPort9867 (dynamic)
* hadoop:name=service=DataNode,FSDatasetState-UndefinedStorageId663800459
* (static)
* hadoop:service=DataNode,name=DataNodeActivity-UndefinedStorageId-520845215

View File

@ -107,7 +107,7 @@
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>0.0.0.0:50090</value>
<value>0.0.0.0:9868</value>
<description>
The secondary namenode http server address and port.
</description>
@ -115,7 +115,7 @@
<property>
<name>dfs.namenode.secondary.https-address</name>
<value>0.0.0.0:50091</value>
<value>0.0.0.0:9869</value>
<description>
The secondary namenode HTTPS server address and port.
</description>
@ -123,7 +123,7 @@
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010</value>
<value>0.0.0.0:9866</value>
<description>
The datanode server address and port for data transfer.
</description>
@ -131,7 +131,7 @@
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:50075</value>
<value>0.0.0.0:9864</value>
<description>
The datanode http server address and port.
</description>
@ -139,7 +139,7 @@
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:50020</value>
<value>0.0.0.0:9867</value>
<description>
The datanode ipc server address and port.
</description>
@ -153,7 +153,7 @@
<property>
<name>dfs.namenode.http-address</name>
<value>0.0.0.0:50070</value>
<value>0.0.0.0:9870</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
</description>
@ -229,13 +229,13 @@
<property>
<name>dfs.datanode.https.address</name>
<value>0.0.0.0:50475</value>
<value>0.0.0.0:9865</value>
<description>The datanode secure http server address and port.</description>
</property>
<property>
<name>dfs.namenode.https-address</name>
<value>0.0.0.0:50470</value>
<value>0.0.0.0:9871</value>
<description>The namenode secure http server address and port.</description>
</property>

View File

@ -142,15 +142,15 @@ The order in which you set these configurations is unimportant, but the values y
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>machine1.example.com:8020</value>
<value>machine1.example.com:9820</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>machine2.example.com:8020</value>
<value>machine2.example.com:9820</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
<value>machine3.example.com:8020</value>
<value>machine3.example.com:9820</value>
</property>
**Note:** You may similarly configure the "**servicerpc-address**" setting if
@ -163,15 +163,15 @@ The order in which you set these configurations is unimportant, but the values y
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>machine1.example.com:50070</value>
<value>machine1.example.com:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>machine2.example.com:50070</value>
<value>machine2.example.com:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn3</name>
<value>machine3.example.com:50070</value>
<value>machine3.example.com:9870</value>
</property>
**Note:** If you have Hadoop's security features enabled, you should also set

View File

@ -155,15 +155,15 @@ The order in which you set these configurations is unimportant, but the values y
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>machine1.example.com:8020</value>
<value>machine1.example.com:9820</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>machine2.example.com:8020</value>
<value>machine2.example.com:9820</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
<value>machine3.example.com:8020</value>
<value>machine3.example.com:9820</value>
</property>
**Note:** You may similarly configure the "**servicerpc-address**" setting if you so desire.
@ -175,15 +175,15 @@ The order in which you set these configurations is unimportant, but the values y
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>machine1.example.com:50070</value>
<value>machine1.example.com:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>machine2.example.com:50070</value>
<value>machine2.example.com:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn3</name>
<value>machine3.example.com:50070</value>
<value>machine3.example.com:9870</value>
</property>
**Note:** If you have Hadoop's security features enabled, you should also set

View File

@ -350,7 +350,7 @@ We are going to remove the file test1.
The comment below shows that the file has been moved to Trash directory.
$ hadoop fs -rm -r delete/test1
Moved: hdfs://localhost:8020/user/hadoop/delete/test1 to trash at: hdfs://localhost:8020/user/hadoop/.Trash/Current
Moved: hdfs://localhost:9820/user/hadoop/delete/test1 to trash at: hdfs://localhost:9820/user/hadoop/.Trash/Current
now we are going to remove the file with skipTrash option,
which will not send the file to Trash.It will be completely removed from HDFS.

View File

@ -121,7 +121,7 @@ The rest of this document assumes the user is able to set up and run a HDFS with
Web Interface
-------------
NameNode and DataNode each run an internal web server in order to display basic information about the current status of the cluster. With the default configuration, the NameNode front page is at `http://namenode-name:50070/`. It lists the DataNodes in the cluster and basic statistics of the cluster. The web interface can also be used to browse the file system (using "Browse the file system" link on the NameNode front page).
NameNode and DataNode each run an internal web server in order to display basic information about the current status of the cluster. With the default configuration, the NameNode front page is at `http://namenode-name:9870/`. It lists the DataNodes in the cluster and basic statistics of the cluster. The web interface can also be used to browse the file system (using "Browse the file system" link on the NameNode front page).
Shell Commands
--------------

View File

@ -210,11 +210,11 @@ The mount tables can be described in `core-site.xml` but it is better to use ind
In the file `mountTable.xml`, there is a definition of the mount table "ClusterX" for the hypothetical cluster that is a federation of the three namespace volumes managed by the three namenodes
1. nn1-clusterx.example.com:8020,
2. nn2-clusterx.example.com:8020, and
3. nn3-clusterx.example.com:8020.
1. nn1-clusterx.example.com:9820,
2. nn2-clusterx.example.com:9820, and
3. nn3-clusterx.example.com:9820.
Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:8020, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:9820, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
```xml
<configuration>
@ -224,19 +224,19 @@ Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.ex
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./home</name>
<value>hdfs://nn1-clusterx.example.com:8020/home</value>
<value>hdfs://nn1-clusterx.example.com:9820/home</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
<value>hdfs://nn1-clusterx.example.com:8020/tmp</value>
<value>hdfs://nn1-clusterx.example.com:9820/tmp</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./projects/foo</name>
<value>hdfs://nn2-clusterx.example.com:8020/projects/foo</value>
<value>hdfs://nn2-clusterx.example.com:9820/projects/foo</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./projects/bar</name>
<value>hdfs://nn3-clusterx.example.com:8020/projects/bar</value>
<value>hdfs://nn3-clusterx.example.com:9820/projects/bar</value>
</property>
</configuration>
```

View File

@ -312,7 +312,7 @@ public class TestDFSClientFailover {
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + service,
namenode);
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + service + "."
+ namenode, "localhost:8020");
+ namenode, "localhost:9820");
// call createProxy implicitly and explicitly
Path p = new Path("/");

View File

@ -470,7 +470,7 @@ public class TestDFSUtil {
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
"localhost", 8020), conf, "http");
"localhost", 9820), conf, "http");
assertEquals(
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
httpAddress);
@ -480,10 +480,10 @@ public class TestDFSUtil {
public void testHANameNodesWithFederation() throws URISyntaxException {
HdfsConfiguration conf = new HdfsConfiguration();
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
final String NS2_NN1_HOST = "ns2-nn1.example.com:9820";
final String NS2_NN2_HOST = "ns2-nn2.example.com:9820";
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
// Two nameservices, each with two NNs.
@ -543,10 +543,10 @@ public class TestDFSUtil {
HdfsConfiguration conf = new HdfsConfiguration();
// One nameservice with two NNs
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:8021";
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:8021";
final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
final String NS1_NN2_HOST = "ns1-nn1.example.com:9820";
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
@ -581,8 +581,8 @@ public class TestDFSUtil {
@Test
public void testGetHaNnHttpAddresses() throws IOException {
final String LOGICAL_HOST_NAME = "ns1";
final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820";
final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820";
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
@ -625,12 +625,12 @@ public class TestDFSUtil {
public void testGetNNUris() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
final String NS2_NN_ADDR = "ns2-nn.example.com:8020";
final String NN1_ADDR = "nn.example.com:8020";
final String NN1_SRVC_ADDR = "nn.example.com:8021";
final String NN2_ADDR = "nn2.example.com:8020";
final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820";
final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820";
final String NS2_NN_ADDR = "ns2-nn.example.com:9820";
final String NN1_ADDR = "nn.example.com:9820";
final String NN1_SRVC_ADDR = "nn.example.com:9821";
final String NN2_ADDR = "nn2.example.com:9820";
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(
@ -762,7 +762,7 @@ public class TestDFSUtil {
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
// it will automatically convert it to hostname
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820");
Collection<URI> uris = DFSUtil.getNameServiceUris(conf);
assertEquals(1, uris.size());
for (URI uri : uris) {

View File

@ -1105,7 +1105,7 @@ public class TestQuota {
@Test
public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820");
DFSAdmin admin = new DFSAdmin(conf);
ByteArrayOutputStream err = new ByteArrayOutputStream();
PrintStream oldErr = System.err;

View File

@ -66,7 +66,7 @@ public class TestReplaceDatanodeOnFailure {
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for(int i = 0; i < infos.length; ) {
infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);

View File

@ -58,8 +58,8 @@ public class TestPendingDataNodeMessages {
Queue<ReportedBlockInfo> q =
msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals(
"ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
"ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
"ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:9866, reportedState=FINALIZED]," +
"ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:9866, reportedState=FINALIZED]",
Joiner.on(",").join(q));
assertEquals(0, msgs.count());

View File

@ -98,7 +98,7 @@ public class TestBlockPoolManager {
public void testSimpleSingleNS() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,
"hdfs://mock1:8020");
"hdfs://mock1:9820");
bpm.refreshNamenodes(conf);
assertEquals("create #1\n", log.toString());
}
@ -108,8 +108,8 @@ public class TestBlockPoolManager {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns1,ns2");
addNN(conf, "ns1", "mock1:8020");
addNN(conf, "ns2", "mock1:8020");
addNN(conf, "ns1", "mock1:9820");
addNN(conf, "ns2", "mock1:9820");
bpm.refreshNamenodes(conf);
assertEquals(
"create #1\n" +
@ -139,9 +139,9 @@ public class TestBlockPoolManager {
public void testInternalNameService() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3");
addNN(conf, "ns1", "mock1:8020");
addNN(conf, "ns2", "mock1:8020");
addNN(conf, "ns3", "mock1:8020");
addNN(conf, "ns1", "mock1:9820");
addNN(conf, "ns2", "mock1:9820");
addNN(conf, "ns3", "mock1:9820");
conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
bpm.refreshNamenodes(conf);
assertEquals("create #1\n", log.toString());

View File

@ -163,8 +163,8 @@ public class TestAllowFormat {
// is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
// is considered.
String localhost = "127.0.0.1";
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 9820);
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9820);
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
conf.set(DFS_NAMENODE_NAME_DIR_KEY,

View File

@ -57,10 +57,10 @@ public class TestRequestHedgingProxyProvider {
DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
conf.set(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
"machine1.foo.bar:8020");
"machine1.foo.bar:9820");
conf.set(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
"machine2.foo.bar:8020");
"machine2.foo.bar:9820");
}
@Test
@ -217,7 +217,7 @@ public class TestRequestHedgingProxyProvider {
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
"nn1,nn2,nn3");
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
"machine3.foo.bar:8020");
"machine3.foo.bar:9820");
final AtomicInteger counter = new AtomicInteger(0);
final int[] isGood = {1};

View File

@ -30,7 +30,7 @@ JIRA - HDFS 7684
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>0.0.0.0:50090 </value>
<value>0.0.0.0:9868 </value>
<description>
The secondary namenode http server address and port.
</description>
@ -38,7 +38,7 @@ JIRA - HDFS 7684
<property>
<name>dfs.namenode.secondary.https-address</name>
<value>0.0.0.0:50091 </value>
<value>0.0.0.0:9869 </value>
<description>
The secondary namenode HTTPS server address and port.
</description>
@ -46,7 +46,7 @@ JIRA - HDFS 7684
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010 </value>
<value>0.0.0.0:9866 </value>
<description>
The datanode server address and port for data transfer.
</description>
@ -54,7 +54,7 @@ JIRA - HDFS 7684
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:50075 </value>
<value>0.0.0.0:9864 </value>
<description>
The datanode http server address and port.
</description>
@ -62,7 +62,7 @@ JIRA - HDFS 7684
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:50020 </value>
<value>0.0.0.0:9867 </value>
<description>
The datanode ipc server address and port.
</description>
@ -76,7 +76,7 @@ JIRA - HDFS 7684
<property>
<name>dfs.namenode.http-address</name>
<value>0.0.0.0:50070 </value>
<value>0.0.0.0:9870 </value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
</description>
@ -84,13 +84,13 @@ JIRA - HDFS 7684
<property>
<name>dfs.datanode.https.address</name>
<value>0.0.0.0:50475 </value>
<value>0.0.0.0:9865 </value>
<description>The datanode secure http server address and port.</description>
</property>
<property>
<name>dfs.namenode.https-address</name>
<value>0.0.0.0:50470 </value>
<value>0.0.0.0:9871 </value>
<description>The namenode secure http server address and port.</description>
</property>

View File

@ -246,7 +246,7 @@ public class TestFileInputFormat {
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len)
throws IOException {
return new BlockLocation[] {
new BlockLocation(new String[] { "localhost:50010", "otherhost:50010" },
new BlockLocation(new String[] { "localhost:9866", "otherhost:9866" },
new String[] { "localhost", "otherhost" }, new String[] { "localhost" },
new String[0], 0, len, false) };
}

View File

@ -425,7 +425,7 @@ public class TestFileInputFormat {
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len)
throws IOException {
return new BlockLocation[] {
new BlockLocation(new String[] { "localhost:50010", "otherhost:50010" },
new BlockLocation(new String[] { "localhost:9866", "otherhost:9866" },
new String[] { "localhost", "otherhost" }, new String[] { "localhost" },
new String[0], 0, len, false) }; }

View File

@ -28,7 +28,7 @@
<property><!--Loaded from job.xml--><name>yarn.nodemanager.resource.memory-mb</name><value>8192</value></property>
<property><!--Loaded from job.xml--><name>io.map.index.interval</name><value>128</value></property>
<property><!--Loaded from job.xml--><name>s3.client-write-packet-size</name><value>65536</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.http-address</name><value>0.0.0.0:50070</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.http-address</name><value>0.0.0.0:9870</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.task.files.preserve.failedtasks</name><value>false</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.reduce.class</name><value>org.apache.hadoop.mapreduce.SleepJob$SleepReducer</value></property>
<property><!--Loaded from job.xml--><name>hadoop.hdfs.configuration.version</name><value>1</value></property>
@ -48,7 +48,7 @@
<property><!--Loaded from job.xml--><name>s3native.blocksize</name><value>67108864</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.edits.dir</name><value>${dfs.namenode.name.dir}</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.map.class</name><value>org.apache.hadoop.mapreduce.SleepJob$SleepMapper</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.http.address</name><value>0.0.0.0:50075</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.http.address</name><value>0.0.0.0:9864</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.jobhistory.task.numberprogresssplits</name><value>12</value></property>
<property><!--Loaded from job.xml--><name>yarn.acl.enable</name><value>true</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.localizer.fetch.thread-count</name><value>4</value></property>
@ -156,7 +156,7 @@
<property><!--Loaded from job.xml--><name>dfs.datanode.data.dir</name><value>file:///home/user/hadoop-user/dfs/data</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.replication.interval</name><value>3</value></property>
<property><!--Loaded from job.xml--><name>fs.file.impl</name><value>org.apache.hadoop.fs.LocalFileSystem</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.https-address</name><value>0.0.0.0:50470</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.https-address</name><value>0.0.0.0:9871</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.task.skip.start.attempts</name><value>2</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value></property>
<property><!--Loaded from job.xml--><name>ipc.client.kill.max</name><value>10</value></property>
@ -255,7 +255,7 @@
<property><!--Loaded from job.xml--><name>kfs.blocksize</name><value>67108864</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.ubertask.maxmaps</name><value>9</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.heartbeat.interval-ms</name><value>1000</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.secondary.http-address</name><value>0.0.0.0:50090</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.secondary.http-address</name><value>0.0.0.0:9868</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.userlog.retain.hours</name><value>24</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.task.timeout</name><value>600000</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.jobhistory.loadedjobs.cache.size</name><value>1</value></property>
@ -318,9 +318,9 @@
<property><!--Loaded from job.xml--><name>fs.s3n.block.size</name><value>67108864</value></property>
<property><!--Loaded from job.xml--><name>fs.ftp.host</name><value>0.0.0.0</value></property>
<property><!--Loaded from job.xml--><name>hadoop.security.group.mapping</name><value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.address</name><value>0.0.0.0:50010</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.address</name><value>0.0.0.0:9866</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.map.skip.maxrecords</name><value>0</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.https.address</name><value>0.0.0.0:50475</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.https.address</name><value>0.0.0.0:9865</value></property>
<property><!--Loaded from job.xml--><name>fs.s3.impl</name><value>org.apache.hadoop.fs.s3.S3FileSystem</value></property>
<property><!--Loaded from job.xml--><name>file.replication</name><value>1</value></property>
<property><!--Loaded from job.xml--><name>yarn.resourcemanager.resource-tracker.address</name><value>0.0.0.0:8025</value></property>
@ -379,7 +379,7 @@
<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.dns.nameserver</name><value>default</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.map.output.compress</name><value>false</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.counters.limit</name><value>120</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.ipc.address</name><value>0.0.0.0:50020</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.ipc.address</name><value>0.0.0.0:9867</value></property>
<property><!--Loaded from job.xml--><name>fs.webhdfs.impl</name><value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value></property>
<property><!--Loaded from job.xml--><name>fs.swebhdfs.impl</name><value>org.apache.hadoop.hdfs.web.SWebHdfsFileSystem</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.delete.debug-delay-sec</name><value>0</value></property>

View File

@ -63,8 +63,8 @@ $H3 Basic Usage
The most common invocation of DistCp is an inter-cluster copy:
bash$ hadoop distcp hdfs://nn1:8020/foo/bar \
hdfs://nn2:8020/bar/foo
bash$ hadoop distcp hdfs://nn1:9820/foo/bar \
hdfs://nn2:9820/bar/foo
This will expand the namespace under `/foo/bar` on nn1 into a temporary file,
partition its contents among a set of map tasks, and start a copy on each
@ -72,19 +72,19 @@ $H3 Basic Usage
One can also specify multiple source directories on the command line:
bash$ hadoop distcp hdfs://nn1:8020/foo/a \
hdfs://nn1:8020/foo/b \
hdfs://nn2:8020/bar/foo
bash$ hadoop distcp hdfs://nn1:9820/foo/a \
hdfs://nn1:9820/foo/b \
hdfs://nn2:9820/bar/foo
Or, equivalently, from a file using the -f option:
bash$ hadoop distcp -f hdfs://nn1:8020/srclist \
hdfs://nn2:8020/bar/foo
bash$ hadoop distcp -f hdfs://nn1:9820/srclist \
hdfs://nn2:9820/bar/foo
Where `srclist` contains
hdfs://nn1:8020/foo/a
hdfs://nn1:8020/foo/b
hdfs://nn1:9820/foo/a
hdfs://nn1:9820/foo/b
When copying from multiple sources, DistCp will abort the copy with an error
message if two sources collide, but collisions at the destination are
@ -126,35 +126,35 @@ $H3 Update and Overwrite
Consider a copy from `/source/first/` and `/source/second/` to `/target/`,
where the source paths have the following contents:
hdfs://nn1:8020/source/first/1
hdfs://nn1:8020/source/first/2
hdfs://nn1:8020/source/second/10
hdfs://nn1:8020/source/second/20
hdfs://nn1:9820/source/first/1
hdfs://nn1:9820/source/first/2
hdfs://nn1:9820/source/second/10
hdfs://nn1:9820/source/second/20
When DistCp is invoked without `-update` or `-overwrite`, the DistCp defaults
would create directories `first/` and `second/`, under `/target`. Thus:
distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
would yield the following contents in `/target`:
hdfs://nn2:8020/target/first/1
hdfs://nn2:8020/target/first/2
hdfs://nn2:8020/target/second/10
hdfs://nn2:8020/target/second/20
hdfs://nn2:9820/target/first/1
hdfs://nn2:9820/target/first/2
hdfs://nn2:9820/target/second/10
hdfs://nn2:9820/target/second/20
When either `-update` or `-overwrite` is specified, the **contents** of the
source-directories are copied to target, and not the source directories
themselves. Thus:
distcp -update hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
distcp -update hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
would yield the following contents in `/target`:
hdfs://nn2:8020/target/1
hdfs://nn2:8020/target/2
hdfs://nn2:8020/target/10
hdfs://nn2:8020/target/20
hdfs://nn2:9820/target/1
hdfs://nn2:9820/target/2
hdfs://nn2:9820/target/10
hdfs://nn2:9820/target/20
By extension, if both source folders contained a file with the same name
(say, `0`), then both sources would map an entry to `/target/0` at the
@ -162,27 +162,27 @@ $H3 Update and Overwrite
Now, consider the following copy operation:
distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
With sources/sizes:
hdfs://nn1:8020/source/first/1 32
hdfs://nn1:8020/source/first/2 32
hdfs://nn1:8020/source/second/10 64
hdfs://nn1:8020/source/second/20 32
hdfs://nn1:9820/source/first/1 32
hdfs://nn1:9820/source/first/2 32
hdfs://nn1:9820/source/second/10 64
hdfs://nn1:9820/source/second/20 32
And destination/sizes:
hdfs://nn2:8020/target/1 32
hdfs://nn2:8020/target/10 32
hdfs://nn2:8020/target/20 64
hdfs://nn2:9820/target/1 32
hdfs://nn2:9820/target/10 32
hdfs://nn2:9820/target/20 64
Will effect:
hdfs://nn2:8020/target/1 32
hdfs://nn2:8020/target/2 32
hdfs://nn2:8020/target/10 64
hdfs://nn2:8020/target/20 32
hdfs://nn2:9820/target/1 32
hdfs://nn2:9820/target/2 32
hdfs://nn2:9820/target/10 64
hdfs://nn2:9820/target/20 32
`1` is skipped because the file-length and contents match. `2` is copied
because it doesn't exist at the target. `10` and `20` are overwritten since

View File

@ -37,36 +37,36 @@ public class TestOptionsParser {
@Test
public void testParseIgnoreFailure() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldIgnoreFailures());
options = OptionsParser.parse(new String[] {
"-i",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldIgnoreFailures());
}
@Test
public void testParseOverwrite() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldOverwrite());
options = OptionsParser.parse(new String[] {
"-overwrite",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldOverwrite());
try {
OptionsParser.parse(new String[] {
"-update",
"-overwrite",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("Update and overwrite aren't allowed together");
} catch (IllegalArgumentException ignore) {
}
@ -75,44 +75,44 @@ public class TestOptionsParser {
@Test
public void testLogPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertNull(options.getLogPath());
options = OptionsParser.parse(new String[] {
"-log",
"hdfs://localhost:8020/logs",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:8020/logs"));
"hdfs://localhost:9820/logs",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:9820/logs"));
}
@Test
public void testParseBlokcing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldBlock());
options = OptionsParser.parse(new String[] {
"-async",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldBlock());
}
@Test
public void testParsebandwidth() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getMapBandwidth(), DistCpConstants.DEFAULT_BANDWIDTH_MB, DELTA);
options = OptionsParser.parse(new String[] {
"-bandwidth",
"11.2",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getMapBandwidth(), 11.2, DELTA);
}
@ -121,8 +121,8 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-bandwidth",
"-11",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
}
@Test(expected=IllegalArgumentException.class)
@ -130,22 +130,22 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-bandwidth",
"0",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
}
@Test
public void testParseSkipCRC() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldSkipCRC());
options = OptionsParser.parse(new String[] {
"-update",
"-skipcrccheck",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldSkipCRC());
}
@ -153,22 +153,22 @@ public class TestOptionsParser {
@Test
public void testParseAtomicCommit() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldAtomicCommit());
options = OptionsParser.parse(new String[] {
"-atomic",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldAtomicCommit());
try {
OptionsParser.parse(new String[] {
"-atomic",
"-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("Atomic and sync folders were allowed");
} catch (IllegalArgumentException ignore) { }
}
@ -176,30 +176,30 @@ public class TestOptionsParser {
@Test
public void testParseWorkPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertNull(options.getAtomicWorkPath());
options = OptionsParser.parse(new String[] {
"-atomic",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertNull(options.getAtomicWorkPath());
options = OptionsParser.parse(new String[] {
"-atomic",
"-tmp",
"hdfs://localhost:8020/work",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:8020/work"));
"hdfs://localhost:9820/work",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:9820/work"));
try {
OptionsParser.parse(new String[] {
"-tmp",
"hdfs://localhost:8020/work",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/work",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("work path was allowed without -atomic switch");
} catch (IllegalArgumentException ignore) {}
}
@ -207,37 +207,37 @@ public class TestOptionsParser {
@Test
public void testParseSyncFolders() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldSyncFolder());
options = OptionsParser.parse(new String[] {
"-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldSyncFolder());
}
@Test
public void testParseDeleteMissing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldDeleteMissing());
options = OptionsParser.parse(new String[] {
"-update",
"-delete",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldDeleteMissing());
options = OptionsParser.parse(new String[] {
"-overwrite",
"-delete",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldOverwrite());
Assert.assertTrue(options.shouldDeleteMissing());
@ -245,8 +245,8 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-atomic",
"-delete",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("Atomic and delete folders were allowed");
} catch (IllegalArgumentException ignore) { }
}
@ -254,38 +254,38 @@ public class TestOptionsParser {
@Test
public void testParseMaps() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getMaxMaps(), DistCpConstants.DEFAULT_MAPS);
options = OptionsParser.parse(new String[] {
"-m",
"1",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getMaxMaps(), 1);
options = OptionsParser.parse(new String[] {
"-m",
"0",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getMaxMaps(), 1);
try {
OptionsParser.parse(new String[] {
"-m",
"hello",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("Non numberic map parsed");
} catch (IllegalArgumentException ignore) { }
try {
OptionsParser.parse(new String[] {
"-mapredXslConf",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("Non numberic map parsed");
} catch (IllegalArgumentException ignore) { }
}
@ -293,8 +293,8 @@ public class TestOptionsParser {
@Test
public void testParseNumListstatusThreads() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
// If command line argument isn't set, we expect .getNumListstatusThreads
// option to be zero (so that we know when to override conf properties).
Assert.assertEquals(0, options.getNumListstatusThreads());
@ -302,23 +302,23 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"12",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(12, options.getNumListstatusThreads());
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"0",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(0, options.getNumListstatusThreads());
try {
OptionsParser.parse(new String[] {
"--numListstatusThreads",
"hello",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("Non numberic numListstatusThreads parsed");
} catch (IllegalArgumentException ignore) { }
@ -326,8 +326,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"100",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(DistCpOptions.maxNumListstatusThreads,
options.getNumListstatusThreads());
}
@ -336,10 +336,10 @@ public class TestOptionsParser {
public void testSourceListing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getSourceFileListing(),
new Path("hdfs://localhost:8020/source/first"));
new Path("hdfs://localhost:9820/source/first"));
}
@Test
@ -347,9 +347,9 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.fail("Both source listing & source paths allowed");
} catch (IllegalArgumentException ignore) {}
}
@ -358,7 +358,7 @@ public class TestOptionsParser {
public void testMissingSourceInfo() {
try {
OptionsParser.parse(new String[] {
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/target/"});
Assert.fail("Neither source listing not source paths present");
} catch (IllegalArgumentException ignore) {}
}
@ -367,7 +367,7 @@ public class TestOptionsParser {
public void testMissingTarget() {
try {
OptionsParser.parse(new String[] {
"-f", "hdfs://localhost:8020/source"});
"-f", "hdfs://localhost:9820/source"});
Assert.fail("Missing target allowed");
} catch (IllegalArgumentException ignore) {}
}
@ -376,7 +376,7 @@ public class TestOptionsParser {
public void testInvalidArgs() {
try {
OptionsParser.parse(new String[] {
"-m", "-f", "hdfs://localhost:8020/source"});
"-m", "-f", "hdfs://localhost:9820/source"});
Assert.fail("Missing map value");
} catch (IllegalArgumentException ignore) {}
}
@ -404,14 +404,14 @@ public class TestOptionsParser {
"-strategy",
"dynamic",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getCopyStrategy(), "dynamic");
options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getCopyStrategy(), DistCpConstants.UNIFORMSIZE);
}
@ -419,17 +419,17 @@ public class TestOptionsParser {
public void testTargetPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:8020/target/"));
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:9820/target/"));
}
@Test
public void testPreserve() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@ -440,8 +440,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-p",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -453,8 +453,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-p",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -467,8 +467,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pbr",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@ -481,8 +481,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pbrgup",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -495,8 +495,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pbrgupcaxt",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -510,8 +510,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pc",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@ -524,8 +524,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-p",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
int i = 0;
Iterator<FileAttribute> attribIterator = options.preserveAttributes();
while (attribIterator.hasNext()) {
@ -538,8 +538,8 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-pabcd",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target"});
Assert.fail("Invalid preserve attribute");
}
catch (IllegalArgumentException ignore) {}
@ -547,8 +547,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
options.preserve(FileAttribute.PERMISSION);
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -573,8 +573,8 @@ public class TestOptionsParser {
DistCpOptions options = OptionsParser.parse(new String[] {
"-atomic",
"-i",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
@ -591,8 +591,8 @@ public class TestOptionsParser {
"-pu",
"-bandwidth",
"11.2",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
@ -609,8 +609,8 @@ public class TestOptionsParser {
DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
"-append", "hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"-append", "hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(
DistCpOptionSwitch.APPEND.getConfigLabel(), false));
@ -620,8 +620,8 @@ public class TestOptionsParser {
// make sure -append is only valid when -update is specified
try {
OptionsParser.parse(new String[] { "-append",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
fail("Append should fail if update option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -632,8 +632,8 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] {
"-append", "-update", "-skipcrccheck",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
fail("Append should fail if skipCrc option is specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -649,8 +649,8 @@ public class TestOptionsParser {
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
"-diff", "s1", "s2",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(), false));
Assert.assertTrue(options.shouldUseDiff());
@ -659,8 +659,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-diff", "s1", ".", "-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(),
false));
@ -671,8 +671,8 @@ public class TestOptionsParser {
// -diff requires two option values
try {
OptionsParser.parse(new String[] {"-diff", "s1", "-update",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
fail("-diff should fail with only one snapshot name");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -682,8 +682,8 @@ public class TestOptionsParser {
// make sure -diff is only valid when -update is specified
try {
OptionsParser.parse(new String[] { "-diff", "s1", "s2",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
fail("-diff should fail if -update option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -692,8 +692,8 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] { "-diff", "s1", "s2", "-update", "-delete",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
fail("-diff should fail if -delete option is specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -703,8 +703,8 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] { "-diff", "s1", "s2",
"-delete", "-overwrite",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
fail("-diff should fail if -update option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -715,15 +715,15 @@ public class TestOptionsParser {
@Test
public void testExclusionsOption() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertNull(options.getFiltersFile());
options = OptionsParser.parse(new String[] {
"-filters",
"/tmp/filters.txt",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt");
}
}

View File

@ -165,7 +165,7 @@ Hadoop uses URIs to refer to files within a filesystem. Some common examples are
local://etc/hosts
hdfs://cluster1/users/example/data/set1
hdfs://cluster2.example.org:8020/users/example/data/set1
hdfs://cluster2.example.org:9820/users/example/data/set1
The Swift Filesystem Client adds a new URL type `swift`. In a Swift Filesystem URL, the hostname part of a URL identifies the container and the service to work with; the path the name of the object. Here are some examples

View File

@ -4549,7 +4549,7 @@
"s3.blocksize" : "67108864",
"mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
"yarn.admin.acl" : "*",
"mapreduce.job.speculative.speculativecap" : "0.1",
"dfs.namenode.num.checkpoints.retained" : "2",
@ -4585,7 +4585,7 @@
"dfs.namenode.edits.dir" : "${dfs.namenode.name.dir}",
"ha.health-monitor.sleep-after-disconnect.ms" : "1000",
"dfs.encrypt.data.transfer" : "false",
"dfs.datanode.http.address" : "0.0.0.0:50075",
"dfs.datanode.http.address" : "0.0.0.0:9864",
"mapreduce.terasort.num-rows" : "400000000",
"mapreduce.job.map.class" : "org.apache.hadoop.examples.terasort.TeraGen$SortGenMapper",
"mapreduce.jobtracker.jobhistory.task.numberprogresssplits" : "12",
@ -4811,7 +4811,7 @@
"ftp.stream-buffer-size" : "4096",
"dfs.namenode.avoid.write.stale.datanode" : "false",
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-1",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-1",
"dfs.blockreport.initialDelay" : "0",
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
"hadoop.http.authentication.token.validity" : "36000",
@ -4837,7 +4837,7 @@
"yarn.scheduler.maximum-allocation-mb" : "8192",
"yarn.nodemanager.heartbeat.interval-ms" : "1000",
"mapreduce.job.userlog.retain.hours" : "24",
"dfs.namenode.secondary.http-address" : "0.0.0.0:50090",
"dfs.namenode.secondary.http-address" : "0.0.0.0:9868",
"mapreduce.task.timeout" : "600000",
"mapreduce.framework.name" : "yarn",
"ipc.client.idlethreshold" : "4000",
@ -4857,7 +4857,7 @@
"hadoop.security.auth_to_local" : "DEFAULT",
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
"ftp.client-write-packet-size" : "65536",
"fs.defaultFS" : "hdfs://a2115.smile.com:8020",
"fs.defaultFS" : "hdfs://a2115.smile.com:9820",
"yarn.nodemanager.address" : "0.0.0.0:0",
"yarn.scheduler.fair.assignmultiple" : "true",
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
@ -4901,9 +4901,9 @@
"fs.s3n.block.size" : "67108864",
"fs.ftp.host" : "0.0.0.0",
"hadoop.security.group.mapping" : "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback",
"dfs.datanode.address" : "0.0.0.0:50010",
"dfs.datanode.address" : "0.0.0.0:9866",
"mapreduce.map.skip.maxrecords" : "0",
"dfs.datanode.https.address" : "0.0.0.0:50475",
"dfs.datanode.https.address" : "0.0.0.0:9865",
"file.replication" : "1",
"yarn.resourcemanager.resource-tracker.address" : "a2115.smile.com:8031",
"dfs.datanode.drop.cache.behind.reads" : "false",
@ -4970,7 +4970,7 @@
"yarn.resourcemanager.resource-tracker.client.thread-count" : "50",
"mapreduce.tasktracker.dns.nameserver" : "default",
"mapreduce.map.output.compress" : "true",
"dfs.datanode.ipc.address" : "0.0.0.0:50020",
"dfs.datanode.ipc.address" : "0.0.0.0:9867",
"hadoop.ssl.require.client.cert" : "false",
"yarn.nodemanager.delete.debug-delay-sec" : "0",
"dfs.datanode.max.transfer.threads" : "4096"
@ -9652,7 +9652,7 @@
"s3.blocksize" : "67108864",
"mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
"yarn.admin.acl" : "*",
"mapreduce.job.speculative.speculativecap" : "0.1",
"dfs.namenode.num.checkpoints.retained" : "2",
@ -9688,7 +9688,7 @@
"dfs.namenode.edits.dir" : "${dfs.namenode.name.dir}",
"ha.health-monitor.sleep-after-disconnect.ms" : "1000",
"dfs.encrypt.data.transfer" : "false",
"dfs.datanode.http.address" : "0.0.0.0:50075",
"dfs.datanode.http.address" : "0.0.0.0:9864",
"mapreduce.terasort.num-rows" : "400000000",
"mapreduce.job.map.class" : "org.apache.hadoop.examples.terasort.TeraGen$SortGenMapper",
"mapreduce.jobtracker.jobhistory.task.numberprogresssplits" : "12",
@ -9914,7 +9914,7 @@
"ftp.stream-buffer-size" : "4096",
"dfs.namenode.avoid.write.stale.datanode" : "false",
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-2",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-2",
"dfs.blockreport.initialDelay" : "0",
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
"hadoop.http.authentication.token.validity" : "36000",
@ -9940,7 +9940,7 @@
"yarn.scheduler.maximum-allocation-mb" : "8192",
"yarn.nodemanager.heartbeat.interval-ms" : "1000",
"mapreduce.job.userlog.retain.hours" : "24",
"dfs.namenode.secondary.http-address" : "0.0.0.0:50090",
"dfs.namenode.secondary.http-address" : "0.0.0.0:9868",
"mapreduce.task.timeout" : "600000",
"mapreduce.framework.name" : "yarn",
"ipc.client.idlethreshold" : "4000",
@ -9960,7 +9960,7 @@
"hadoop.security.auth_to_local" : "DEFAULT",
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
"ftp.client-write-packet-size" : "65536",
"fs.defaultFS" : "hdfs://a2115.smile.com:8020",
"fs.defaultFS" : "hdfs://a2115.smile.com:9820",
"yarn.nodemanager.address" : "0.0.0.0:0",
"yarn.scheduler.fair.assignmultiple" : "true",
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
@ -10004,9 +10004,9 @@
"fs.s3n.block.size" : "67108864",
"fs.ftp.host" : "0.0.0.0",
"hadoop.security.group.mapping" : "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback",
"dfs.datanode.address" : "0.0.0.0:50010",
"dfs.datanode.address" : "0.0.0.0:9866",
"mapreduce.map.skip.maxrecords" : "0",
"dfs.datanode.https.address" : "0.0.0.0:50475",
"dfs.datanode.https.address" : "0.0.0.0:9865",
"file.replication" : "1",
"yarn.resourcemanager.resource-tracker.address" : "a2115.smile.com:8031",
"dfs.datanode.drop.cache.behind.reads" : "false",
@ -10073,7 +10073,7 @@
"yarn.resourcemanager.resource-tracker.client.thread-count" : "50",
"mapreduce.tasktracker.dns.nameserver" : "default",
"mapreduce.map.output.compress" : "true",
"dfs.datanode.ipc.address" : "0.0.0.0:50020",
"dfs.datanode.ipc.address" : "0.0.0.0:9867",
"hadoop.ssl.require.client.cert" : "false",
"yarn.nodemanager.delete.debug-delay-sec" : "0",
"dfs.datanode.max.transfer.threads" : "4096"
@ -10255,7 +10255,7 @@
"s3.blocksize" : "67108864",
"mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
"yarn.admin.acl" : "*",
"mapreduce.job.speculative.speculativecap" : "0.1",
"dfs.namenode.num.checkpoints.retained" : "2",
@ -10291,7 +10291,7 @@
"dfs.namenode.edits.dir" : "${dfs.namenode.name.dir}",
"ha.health-monitor.sleep-after-disconnect.ms" : "1000",
"dfs.encrypt.data.transfer" : "false",
"dfs.datanode.http.address" : "0.0.0.0:50075",
"dfs.datanode.http.address" : "0.0.0.0:9864",
"mapreduce.terasort.num-rows" : "400000000",
"mapreduce.job.map.class" : "org.apache.hadoop.examples.terasort.TeraGen$SortGenMapper",
"mapreduce.jobtracker.jobhistory.task.numberprogresssplits" : "12",
@ -10518,7 +10518,7 @@
"ftp.stream-buffer-size" : "4096",
"dfs.namenode.avoid.write.stale.datanode" : "false",
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-1",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-1",
"dfs.blockreport.initialDelay" : "0",
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
"hadoop.http.authentication.token.validity" : "36000",
@ -10544,7 +10544,7 @@
"yarn.scheduler.maximum-allocation-mb" : "8192",
"yarn.nodemanager.heartbeat.interval-ms" : "1000",
"mapreduce.job.userlog.retain.hours" : "24",
"dfs.namenode.secondary.http-address" : "0.0.0.0:50090",
"dfs.namenode.secondary.http-address" : "0.0.0.0:9868",
"mapreduce.task.timeout" : "600000",
"mapreduce.framework.name" : "yarn",
"ipc.client.idlethreshold" : "4000",
@ -10564,7 +10564,7 @@
"hadoop.security.auth_to_local" : "DEFAULT",
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
"ftp.client-write-packet-size" : "65536",
"fs.defaultFS" : "hdfs://a2115.smile.com:8020",
"fs.defaultFS" : "hdfs://a2115.smile.com:9820",
"yarn.nodemanager.address" : "0.0.0.0:0",
"yarn.scheduler.fair.assignmultiple" : "true",
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
@ -10608,9 +10608,9 @@
"fs.s3n.block.size" : "67108864",
"fs.ftp.host" : "0.0.0.0",
"hadoop.security.group.mapping" : "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback",
"dfs.datanode.address" : "0.0.0.0:50010",
"dfs.datanode.address" : "0.0.0.0:9866",
"mapreduce.map.skip.maxrecords" : "0",
"dfs.datanode.https.address" : "0.0.0.0:50475",
"dfs.datanode.https.address" : "0.0.0.0:9865",
"file.replication" : "1",
"yarn.resourcemanager.resource-tracker.address" : "a2115.smile.com:8031",
"dfs.datanode.drop.cache.behind.reads" : "false",
@ -10677,7 +10677,7 @@
"yarn.resourcemanager.resource-tracker.client.thread-count" : "50",
"mapreduce.tasktracker.dns.nameserver" : "default",
"mapreduce.map.output.compress" : "true",
"dfs.datanode.ipc.address" : "0.0.0.0:50020",
"dfs.datanode.ipc.address" : "0.0.0.0:9867",
"hadoop.ssl.require.client.cert" : "false",
"yarn.nodemanager.delete.debug-delay-sec" : "0",
"dfs.datanode.max.transfer.threads" : "4096"

View File

@ -34,7 +34,7 @@ public interface AddressTypes {
* The host/domain name and port are set as separate strings in the address
* list, e.g.
* <pre>
* ["namenode.example.org", "50070"]
* ["namenode.example.org", "9870"]
* </pre>
*/
public static final String ADDRESS_HOSTNAME_AND_PORT = "host/port";

View File

@ -147,7 +147,7 @@ public class RegistryTestHelper extends Assert {
Map<String, String> url = addressList.get(0);
String addr = url.get("uri");
assertTrue(addr.contains("http"));
assertTrue(addr.contains(":8020"));
assertTrue(addr.contains(":9820"));
Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2);
assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT,
@ -275,7 +275,7 @@ public class RegistryTestHelper extends Assert {
new URI("http", hostname + ":80", "/")));
entry.addExternalEndpoint(
restEndpoint(API_WEBHDFS,
new URI("http", hostname + ":8020", "/")));
new URI("http", hostname + ":9820", "/")));
Endpoint endpoint = ipcEndpoint(API_HDFS, null);
endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030));

View File

@ -63,7 +63,7 @@ public class TestPBRecordImpl {
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
assertTrue(ret instanceof LocalResourcePBImpl);
ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(
"hdfs://y.ak:8020/foo/bar")));
"hdfs://y.ak:9820/foo/bar")));
ret.setSize(4344L);
ret.setTimestamp(3141592653589793L);
ret.setVisibility(LocalResourceVisibility.PUBLIC);