diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index ed15fa43eab..ba6e4e2d45d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -98,7 +98,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { /** * CallQueue related settings. These are not used directly, but rather * combined with a namespace and port. For instance: - * IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY + * IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY */ public static final String IPC_NAMESPACE = "ipc"; public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl"; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java index ac9776fcfa1..7b0a25cf4b2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java @@ -83,11 +83,11 @@ import org.slf4j.LoggerFactory; * *
Examples:
*- * $ bin/hadoop dfs -fs darwin:9820 -ls /data - * list /data directory in dfs with namenode darwin:9820 + * $ bin/hadoop dfs -fs darwin:8020 -ls /data + * list /data directory in dfs with namenode darwin:8020 * - * $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data - * list /data directory in dfs with namenode darwin:9820 + * $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data + * list /data directory in dfs with namenode darwin:8020 * * $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data * list /data directory in dfs with multiple conf files specified. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java index fefcf0ecfea..5de32861db6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java @@ -47,6 +47,6 @@ public class TestDelegateToFileSystem { @Test public void testDefaultURIwithPort() throws Exception { - testDefaultUriInternal("hdfs://dummyhost:9820"); + testDefaultUriInternal("hdfs://dummyhost:8020"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java index f0ebc1ec846..b07da8da5a8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java @@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort { private static String TEST_FENCING_HOST = System.getProperty( "test.TestSshFenceByTcpPort.host", "localhost"); private static final String TEST_FENCING_PORT = System.getProperty( - "test.TestSshFenceByTcpPort.port", "9820"); + "test.TestSshFenceByTcpPort.port", "8020"); private static final String TEST_KEYFILE = System.getProperty( "test.TestSshFenceByTcpPort.key"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 38c2435aa11..52a7cd0c080 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -73,7 +73,7 @@ public interface HdfsClientConfigKeys { int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871; String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address"; String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes"; - int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820; + int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020; String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = "dfs.namenode.kerberos.principal"; String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 8feba964ec9..65fbbf8948e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -70,10 +70,10 @@ public class TestRequestHedgingProxyProvider { HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2"); conf.set( HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1", - "machine1.foo.bar:9820"); + "machine1.foo.bar:8020"); conf.set( HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2", - "machine2.foo.bar:9820"); + "machine2.foo.bar:8020"); } @Test @@ -294,7 +294,7 @@ public class TestRequestHedgingProxyProvider { conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2,nn3"); conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3", - "machine3.foo.bar:9820"); + "machine3.foo.bar:8020"); final AtomicInteger counter = new AtomicInteger(0); final int[] isGood = {1}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index f30644ccc26..9b77509a752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -37,7 +37,7 @@ RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist, the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1 dfs.namenode.rpc-address.EXAMPLENAMESERVICE - The value of this property will take the form of nn-host1:rpc-port. The NameNode's default RPC port is 9820. + The value of this property will take the form of nn-host1:rpc-port. The NameNode's default RPC port is 8020. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md index 7db9b62c0cf..d607561b6dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md @@ -119,15 +119,15 @@ The order in which you set these configurations is unimportant, but the values ydfs.namenode.rpc-address.mycluster.nn1 -machine1.example.com:9820 +machine1.example.com:8020 dfs.namenode.rpc-address.mycluster.nn2 -machine2.example.com:9820 +machine2.example.com:8020 **Note:** You may similarly configure the "**servicerpc-address**" setting if diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md index 8df3209ea1b..2ac77676cd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md @@ -132,15 +132,15 @@ The order in which you set these configurations is unimportant, but the values y dfs.namenode.rpc-address.mycluster.nn3 -machine3.example.com:9820 +machine3.example.com:8020 dfs.namenode.rpc-address.mycluster.nn1 -machine1.example.com:9820 +machine1.example.com:8020 dfs.namenode.rpc-address.mycluster.nn2 -machine2.example.com:9820 +machine2.example.com:8020 **Note:** You may similarly configure the "**servicerpc-address**" setting if you so desire. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md index 76cd2bff8e5..471a27f0fbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md @@ -291,7 +291,7 @@ We are going to remove the file test1. The comment below shows that the file has been moved to Trash directory. $ hadoop fs -rm -r delete/test1 - Moved: hdfs://localhost:9820/user/hadoop/delete/test1 to trash at: hdfs://localhost:9820/user/hadoop/.Trash/Current + Moved: hdfs://localhost:8020/user/hadoop/delete/test1 to trash at: hdfs://localhost:8020/user/hadoop/.Trash/Current now we are going to remove the file with skipTrash option, which will not send the file to Trash.It will be completely removed from HDFS. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md index 94c90ff7ac5..10085832566 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md @@ -104,40 +104,40 @@ The authority following the `viewfs://` scheme in the URI is the mount table nam The mount points of a mount table are specified in the standard Hadoop configuration files. All the mount table config entries for `viewfs` are prefixed by `fs.viewfs.mounttable.`. The mount points that are linking other filesystems are specified using `link` tags. The recommendation is to have mount points name same as in the linked filesystem target locations. For all namespaces that are not configured in the mount table, we can have them fallback to a default filesystem via `linkFallback`. -In the below mount table configuration, namespace `/data` is linked to the filesystem `hdfs://nn1-clusterx.example.com:9820/data`, `/project` is linked to the filesystem `hdfs://nn2-clusterx.example.com:9820/project`. All namespaces that are not configured in the mount table, like `/logs` are linked to the filesystem `hdfs://nn5-clusterx.example.com:9820/home`. +In the below mount table configuration, namespace `/data` is linked to the filesystem `hdfs://nn1-clusterx.example.com:8020/data`, `/project` is linked to the filesystem `hdfs://nn2-clusterx.example.com:8020/project`. All namespaces that are not configured in the mount table, like `/logs` are linked to the filesystem `hdfs://nn5-clusterx.example.com:8020/home`. ```xml dfs.namenode.rpc-address.mycluster.nn3 -machine3.example.com:9820 +machine3.example.com:8020 ``` -Alternatively we can have the mount table's root merged with the root of another filesystem via `linkMergeSlash`. In the below mount table configuration, ClusterY's root is merged with the root filesystem at `hdfs://nn1-clustery.example.com:9820`. +Alternatively we can have the mount table's root merged with the root of another filesystem via `linkMergeSlash`. In the below mount table configuration, ClusterY's root is merged with the root filesystem at `hdfs://nn1-clustery.example.com:8020`. ```xml fs.viewfs.mounttable.ClusterX.link./data -hdfs://nn1-clusterx.example.com:9820/data +hdfs://nn1-clusterx.example.com:8020/data fs.viewfs.mounttable.ClusterX.link./project -hdfs://nn2-clusterx.example.com:9820/project +hdfs://nn2-clusterx.example.com:8020/project fs.viewfs.mounttable.ClusterX.link./user -hdfs://nn3-clusterx.example.com:9820/user +hdfs://nn3-clusterx.example.com:8020/user fs.viewfs.mounttable.ClusterX.link./tmp -hdfs://nn4-clusterx.example.com:9820/tmp +hdfs://nn4-clusterx.example.com:8020/tmp fs.viewfs.mounttable.ClusterX.linkFallback -hdfs://nn5-clusterx.example.com:9820/home +hdfs://nn5-clusterx.example.com:8020/home ``` @@ -237,11 +237,11 @@ The mount tables can be described in `core-site.xml` but it is better to use ind In the file `mountTable.xml`, there is a definition of the mount table "ClusterX" for the hypothetical cluster that is a federation of the three namespace volumes managed by the three namenodes -1. nn1-clusterx.example.com:9820, -2. nn2-clusterx.example.com:9820, and -3. nn3-clusterx.example.com:9820. +1. nn1-clusterx.example.com:8020, +2. nn2-clusterx.example.com:8020, and +3. nn3-clusterx.example.com:8020. -Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:9820, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html). +Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:8020, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html). ```xml fs.viewfs.mounttable.ClusterY.linkMergeSlash -hdfs://nn1-clustery.example.com:9820/ +hdfs://nn1-clustery.example.com:8020/ @@ -251,19 +251,19 @@ Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.ex ``` diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index 6265f44aa20..c14ebb41dfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -312,7 +312,7 @@ public class TestDFSClientFailover { conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + service, namenode); conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + service + "." - + namenode, "localhost:9820"); + + namenode, "localhost:8020"); // call createProxy implicitly and explicitly Path p = new Path("/"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 39f76a57207..afe34ad228d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -84,9 +84,9 @@ import com.google.common.collect.Sets; public class TestDFSUtil { - static final String NS1_NN_ADDR = "ns1-nn.example.com:9820"; - static final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820"; - static final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820"; + static final String NS1_NN_ADDR = "ns1-nn.example.com:8020"; + static final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020"; + static final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020"; /** * Reset to default UGI settings since some tests change them. @@ -478,7 +478,7 @@ public class TestDFSUtil { DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport); URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress( - "localhost", 9820), conf, "http"); + "localhost", 8020), conf, "http"); assertEquals( URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT), httpAddress); @@ -488,10 +488,10 @@ public class TestDFSUtil { public void testHANameNodesWithFederation() throws URISyntaxException { HdfsConfiguration conf = new HdfsConfiguration(); - final String NS1_NN1_HOST = "ns1-nn1.example.com:9820"; - final String NS1_NN2_HOST = "ns1-nn2.example.com:9820"; - final String NS2_NN1_HOST = "ns2-nn1.example.com:9820"; - final String NS2_NN2_HOST = "ns2-nn2.example.com:9820"; + final String NS1_NN1_HOST = "ns1-nn1.example.com:8020"; + final String NS1_NN2_HOST = "ns1-nn2.example.com:8020"; + final String NS2_NN1_HOST = "ns2-nn1.example.com:8020"; + final String NS2_NN2_HOST = "ns2-nn2.example.com:8020"; conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); // Two nameservices, each with two NNs. @@ -555,9 +555,9 @@ public class TestDFSUtil { HdfsConfiguration conf = new HdfsConfiguration(); // One nameservice with two NNs - final String NS1_NN1_HOST = "ns1-nn1.example.com:9820"; + final String NS1_NN1_HOST = "ns1-nn1.example.com:8020"; final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821"; - final String NS1_NN2_HOST = "ns1-nn1.example.com:9820"; + final String NS1_NN2_HOST = "ns1-nn1.example.com:8020"; final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821"; conf.set(DFS_NAMESERVICES, "ns1"); @@ -641,10 +641,10 @@ public class TestDFSUtil { public void testGetNNUris() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); - final String NS2_NN_ADDR = "ns2-nn.example.com:9820"; - final String NN1_ADDR = "nn.example.com:9820"; + final String NS2_NN_ADDR = "ns2-nn.example.com:8020"; + final String NN1_ADDR = "nn.example.com:8020"; final String NN1_SRVC_ADDR = "nn.example.com:9821"; - final String NN2_ADDR = "nn2.example.com:9820"; + final String NN2_ADDR = "nn2.example.com:8020"; conf.set(DFS_NAMESERVICES, "ns1"); conf.set(DFSUtil.addKeySuffixes( @@ -822,7 +822,7 @@ public class TestDFSUtil { // Make sure when config FS_DEFAULT_NAME_KEY using IP address, // it will automatically convert it to hostname HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820"); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020"); Collectionfs.viewfs.mounttable.ClusterX.link./home -hdfs://nn1-clusterx.example.com:9820/home +hdfs://nn1-clusterx.example.com:8020/home fs.viewfs.mounttable.ClusterX.link./tmp -hdfs://nn1-clusterx.example.com:9820/tmp +hdfs://nn1-clusterx.example.com:8020/tmp fs.viewfs.mounttable.ClusterX.link./projects/foo -hdfs://nn2-clusterx.example.com:9820/projects/foo +hdfs://nn2-clusterx.example.com:8020/projects/foo fs.viewfs.mounttable.ClusterX.link./projects/bar -hdfs://nn3-clusterx.example.com:9820/projects/bar +hdfs://nn3-clusterx.example.com:8020/projects/bar uris = getInternalNameServiceUris(conf); assertEquals(1, uris.size()); for (URI uri : uris) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index dd1d538f05d..88889f3c6a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -1148,7 +1148,7 @@ public class TestQuota { @Test public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception { Configuration conf = new HdfsConfiguration(); - conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820"); + conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020"); DFSAdmin admin = new DFSAdmin(conf); ByteArrayOutputStream err = new ByteArrayOutputStream(); PrintStream oldErr = System.err; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index 5a8a39a89e6..3fbcd26aed1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -353,7 +353,7 @@ public class TestBlockTokenWithDFS { try { // prefer non-ephemeral port to avoid port collision on restartNameNode cluster = new MiniDFSCluster.Builder(conf) - .nameNodePort(ServerSocketUtil.getPort(19820, 100)) + .nameNodePort(ServerSocketUtil.getPort(18020, 100)) .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100)) .numDataNodes(numDataNodes) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java index 7627cf5c6a5..0b39456bf8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java @@ -79,7 +79,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS { } cluster = new MiniDFSCluster.Builder(conf) - .nameNodePort(ServerSocketUtil.getPort(19820, 100)) + .nameNodePort(ServerSocketUtil.getPort(18020, 100)) .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100)) .numDataNodes(numDNs) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java index 560b32e0ee3..951adbdc545 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java @@ -100,7 +100,7 @@ public class TestBlockPoolManager { public void testSimpleSingleNS() throws Exception { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, - "hdfs://mock1:9820"); + "hdfs://mock1:8020"); bpm.refreshNamenodes(conf); assertEquals("create #1\n", log.toString()); } @@ -110,8 +110,8 @@ public class TestBlockPoolManager { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2"); - addNN(conf, "ns1", "mock1:9820"); - addNN(conf, "ns2", "mock1:9820"); + addNN(conf, "ns1", "mock1:8020"); + addNN(conf, "ns2", "mock1:8020"); bpm.refreshNamenodes(conf); assertEquals( "create #1\n" + @@ -141,9 +141,9 @@ public class TestBlockPoolManager { public void testInternalNameService() throws Exception { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3"); - addNN(conf, "ns1", "mock1:9820"); - addNN(conf, "ns2", "mock1:9820"); - addNN(conf, "ns3", "mock1:9820"); + addNN(conf, "ns1", "mock1:8020"); + addNN(conf, "ns2", "mock1:8020"); + addNN(conf, "ns3", "mock1:8020"); conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1"); bpm.refreshNamenodes(conf); assertEquals("create #1\n", log.toString()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java index f3ffebe85e8..7e3a030b60d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java @@ -163,8 +163,8 @@ public class TestAllowFormat { // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY // is considered. String localhost = "127.0.0.1"; - InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 9820); - InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9820); + InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020); + InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 8020); HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2); conf.set(DFS_NAMENODE_NAME_DIR_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java index 09d854493a3..15c20b4cb64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java @@ -367,7 +367,7 @@ public class TestGetConf { public void testGetJournalNodes() throws Exception { final int nsCount = 3; - final String journalsBaseUri = "qjournal://jn0:9820;jn1:9820;jn2:9820"; + final String journalsBaseUri = "qjournal://jn0:8020;jn1:8020;jn2:8020"; setupStaticHostResolution(nsCount, "jn"); // With out Name service Id @@ -490,7 +490,7 @@ public class TestGetConf { @Test(expected = UnknownHostException.class, timeout = 10000) public void testUnknownJournalNodeHost() throws URISyntaxException, IOException { - String journalsBaseUri = "qjournal://jn1:9820;jn2:9820;jn3:9820"; + String journalsBaseUri = "qjournal://jn1:8020;jn2:8020;jn3:8020"; HdfsConfiguration conf = new HdfsConfiguration(false); conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, journalsBaseUri + "/jndata"); @@ -504,7 +504,7 @@ public class TestGetConf { public void testJournalNodeUriError() throws URISyntaxException, IOException { final int nsCount = 3; - String journalsBaseUri = "qjournal://jn0 :9820;jn1:9820;jn2:9820"; + String journalsBaseUri = "qjournal://jn0 :8020;jn1:8020;jn2:8020"; setupStaticHostResolution(nsCount, "jn"); HdfsConfiguration conf = new HdfsConfiguration(false); conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist index b081fd837a7..b2f407a6f03 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist @@ -1,7 +1,7 @@ Avro-Json {"type":"record","name":"Event","namespace":"org.apache.hadoop.mapreduce.jobhistory","fields":[{"name":"type","type":{"type":"enum","name":"EventType","symbols":["JOB_SUBMITTED","JOB_INITED","JOB_FINISHED","JOB_PRIORITY_CHANGED","JOB_STATUS_CHANGED","JOB_FAILED","JOB_KILLED","JOB_INFO_CHANGED","TASK_STARTED","TASK_FINISHED","TASK_FAILED","TASK_UPDATED","NORMALIZED_RESOURCE","MAP_ATTEMPT_STARTED","MAP_ATTEMPT_FINISHED","MAP_ATTEMPT_FAILED","MAP_ATTEMPT_KILLED","REDUCE_ATTEMPT_STARTED","REDUCE_ATTEMPT_FINISHED","REDUCE_ATTEMPT_FAILED","REDUCE_ATTEMPT_KILLED","SETUP_ATTEMPT_STARTED","SETUP_ATTEMPT_FINISHED","SETUP_ATTEMPT_FAILED","SETUP_ATTEMPT_KILLED","CLEANUP_ATTEMPT_STARTED","CLEANUP_ATTEMPT_FINISHED","CLEANUP_ATTEMPT_FAILED","CLEANUP_ATTEMPT_KILLED","AM_STARTED"]}},{"name":"event","type":[{"type":"record","name":"JobFinished","fields":[{"name":"jobid","type":"string"},{"name":"finishTime","type":"long"},{"name":"finishedMaps","type":"int"},{"name":"finishedReduces","type":"int"},{"name":"failedMaps","type":"int"},{"name":"failedReduces","type":"int"},{"name":"totalCounters","type":{"type":"record","name":"JhCounters","fields":[{"name":"name","type":"string"},{"name":"groups","type":{"type":"array","items":{"type":"record","name":"JhCounterGroup","fields":[{"name":"name","type":"string"},{"name":"displayName","type":"string"},{"name":"counts","type":{"type":"array","items":{"type":"record","name":"JhCounter","fields":[{"name":"name","type":"string"},{"name":"displayName","type":"string"},{"name":"value","type":"long"}]}}}]}}}]}},{"name":"mapCounters","type":"JhCounters"},{"name":"reduceCounters","type":"JhCounters"}]},{"type":"record","name":"JobInfoChange","fields":[{"name":"jobid","type":"string"},{"name":"submitTime","type":"long"},{"name":"launchTime","type":"long"}]},{"type":"record","name":"JobInited","fields":[{"name":"jobid","type":"string"},{"name":"launchTime","type":"long"},{"name":"totalMaps","type":"int"},{"name":"totalReduces","type":"int"},{"name":"jobStatus","type":"string"},{"name":"uberized","type":"boolean"}]},{"type":"record","name":"AMStarted","fields":[{"name":"applicationAttemptId","type":"string"},{"name":"startTime","type":"long"},{"name":"containerId","type":"string"},{"name":"nodeManagerHost","type":"string"},{"name":"nodeManagerPort","type":"int"},{"name":"nodeManagerHttpPort","type":"int"}]},{"type":"record","name":"JobPriorityChange","fields":[{"name":"jobid","type":"string"},{"name":"priority","type":"string"}]},{"type":"record","name":"JobStatusChanged","fields":[{"name":"jobid","type":"string"},{"name":"jobStatus","type":"string"}]},{"type":"record","name":"JobSubmitted","fields":[{"name":"jobid","type":"string"},{"name":"jobName","type":"string"},{"name":"userName","type":"string"},{"name":"submitTime","type":"long"},{"name":"jobConfPath","type":"string"},{"name":"acls","type":{"type":"map","values":"string"}},{"name":"jobQueueName","type":"string"}]},{"type":"record","name":"JobUnsuccessfulCompletion","fields":[{"name":"jobid","type":"string"},{"name":"finishTime","type":"long"},{"name":"finishedMaps","type":"int"},{"name":"finishedReduces","type":"int"},{"name":"jobStatus","type":"string"}]},{"type":"record","name":"MapAttemptFinished","fields":[{"name":"taskid","type":"string"},{"name":"attemptId","type":"string"},{"name":"taskType","type":"string"},{"name":"taskStatus","type":"string"},{"name":"mapFinishTime","type":"long"},{"name":"finishTime","type":"long"},{"name":"hostname","type":"string"},{"name":"port","type":"int"},{"name":"rackname","type":"string"},{"name":"state","type":"string"},{"name":"counters","type":"JhCounters"},{"name":"clockSplits","type":{"type":"array","items":"int"}},{"name":"cpuUsages","type":{"type":"array","items":"int"}},{"name":"vMemKbytes","type":{"type":"array","items":"int"}},{"name":"physMemKbytes","type":{"type":"array","items":"int"}}]},{"type":"record","name":"ReduceAttemptFinished","fields":[{"name":"taskid","type":"string"},{"name":"attemptId","type":"string"},{"name":"taskType","type":"string"},{"name":"taskStatus","type":"string"},{"name":"shuffleFinishTime","type":"long"},{"name":"sortFinishTime","type":"long"},{"name":"finishTime","type":"long"},{"name":"hostname","type":"string"},{"name":"port","type":"int"},{"name":"rackname","type":"string"},{"name":"state","type":"string"},{"name":"counters","type":"JhCounters"},{"name":"clockSplits","type":{"type":"array","items":"int"}},{"name":"cpuUsages","type":{"type":"array","items":"int"}},{"name":"vMemKbytes","type":{"type":"array","items":"int"}},{"name":"physMemKbytes","type":{"type":"array","items":"int"}}]},{"type":"record","name":"TaskAttemptFinished","fields":[{"name":"taskid","type":"string"},{"name":"attemptId","type":"string"},{"name":"taskType","type":"string"},{"name":"taskStatus","type":"string"},{"name":"finishTime","type":"long"},{"name":"rackname","type":"string"},{"name":"hostname","type":"string"},{"name":"state","type":"string"},{"name":"counters","type":"JhCounters"}]},{"type":"record","name":"TaskAttemptStarted","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"attemptId","type":"string"},{"name":"startTime","type":"long"},{"name":"trackerName","type":"string"},{"name":"httpPort","type":"int"},{"name":"shufflePort","type":"int"},{"name":"containerId","type":"string"}]},{"type":"record","name":"TaskAttemptUnsuccessfulCompletion","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"attemptId","type":"string"},{"name":"finishTime","type":"long"},{"name":"hostname","type":"string"},{"name":"port","type":"int"},{"name":"rackname","type":"string"},{"name":"status","type":"string"},{"name":"error","type":"string"},{"name":"clockSplits","type":{"type":"array","items":"int"}},{"name":"cpuUsages","type":{"type":"array","items":"int"}},{"name":"vMemKbytes","type":{"type":"array","items":"int"}},{"name":"physMemKbytes","type":{"type":"array","items":"int"}}]},{"type":"record","name":"TaskFailed","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"finishTime","type":"long"},{"name":"error","type":"string"},{"name":"failedDueToAttempt","type":["null","string"]},{"name":"status","type":"string"}]},{"type":"record","name":"TaskFinished","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"finishTime","type":"long"},{"name":"status","type":"string"},{"name":"counters","type":"JhCounters"},{"name":"successfulAttemptId","type":"string"}]},{"type":"record","name":"TaskStarted","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"startTime","type":"long"},{"name":"splitLocations","type":"string"}]},{"type":"record","name":"TaskUpdated","fields":[{"name":"taskid","type":"string"},{"name":"finishTime","type":"long"}]}]}]} {"type":"AM_STARTED","event":{"org.apache.hadoop.mapreduce.jobhistory.AMStarted":{"applicationAttemptId":"appattempt_1399356417814_19732_000001","startTime":1400251473264,"containerId":"container_1399356417814_19732_01_000001","nodeManagerHost":"localhost","nodeManagerPort":8041,"nodeManagerHttpPort":8042}}} - {"type":"JOB_SUBMITTED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobSubmitted":{"jobid":"job_1399356417814_19732","jobName":"Fail job","userName":"rushabhs","submitTime":1400251470231,"jobConfPath":"hdfs://localhost:9820/user/rushabhs/.staging/job_1399356417814_19732/job.xml","acls":{"mapreduce.job.acl-view-job":" ","mapreduce.job.acl-modify-job":" "},"jobQueueName":"unfunded"}}} + {"type":"JOB_SUBMITTED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobSubmitted":{"jobid":"job_1399356417814_19732","jobName":"Fail job","userName":"rushabhs","submitTime":1400251470231,"jobConfPath":"hdfs://localhost:8020/user/rushabhs/.staging/job_1399356417814_19732/job.xml","acls":{"mapreduce.job.acl-view-job":" ","mapreduce.job.acl-modify-job":" "},"jobQueueName":"unfunded"}}} {"type":"JOB_INITED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobInited":{"jobid":"job_1399356417814_19732","launchTime":1400251475763,"totalMaps":2,"totalReduces":1,"jobStatus":"INITED","uberized":false}}} {"type":"JOB_INFO_CHANGED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobInfoChange":{"jobid":"job_1399356417814_19732","submitTime":1400251470231,"launchTime":1400251475763}}} {"type":"TASK_STARTED","event":{"org.apache.hadoop.mapreduce.jobhistory.TaskStarted":{"taskid":"task_1399356417814_19732_m_000000","taskType":"MAP","startTime":1400251475786,"splitLocations":"localhost,localhost,localhost"}}} diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm index 8aa89f01d34..f2b3deeca18 100644 --- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm +++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm @@ -63,8 +63,8 @@ $H3 Basic Usage The most common invocation of DistCp is an inter-cluster copy: - bash$ hadoop distcp hdfs://nn1:9820/foo/bar \ - hdfs://nn2:9820/bar/foo + bash$ hadoop distcp hdfs://nn1:8020/foo/bar \ + hdfs://nn2:8020/bar/foo This will expand the namespace under `/foo/bar` on nn1 into a temporary file, partition its contents among a set of map tasks, and start a copy on each @@ -72,19 +72,19 @@ $H3 Basic Usage One can also specify multiple source directories on the command line: - bash$ hadoop distcp hdfs://nn1:9820/foo/a \ - hdfs://nn1:9820/foo/b \ - hdfs://nn2:9820/bar/foo + bash$ hadoop distcp hdfs://nn1:8020/foo/a \ + hdfs://nn1:8020/foo/b \ + hdfs://nn2:8020/bar/foo Or, equivalently, from a file using the -f option: - bash$ hadoop distcp -f hdfs://nn1:9820/srclist \ - hdfs://nn2:9820/bar/foo + bash$ hadoop distcp -f hdfs://nn1:8020/srclist \ + hdfs://nn2:8020/bar/foo Where `srclist` contains - hdfs://nn1:9820/foo/a - hdfs://nn1:9820/foo/b + hdfs://nn1:8020/foo/a + hdfs://nn1:8020/foo/b When copying from multiple sources, DistCp will abort the copy with an error message if two sources collide, but collisions at the destination are @@ -126,35 +126,35 @@ $H3 Update and Overwrite Consider a copy from `/source/first/` and `/source/second/` to `/target/`, where the source paths have the following contents: - hdfs://nn1:9820/source/first/1 - hdfs://nn1:9820/source/first/2 - hdfs://nn1:9820/source/second/10 - hdfs://nn1:9820/source/second/20 + hdfs://nn1:8020/source/first/1 + hdfs://nn1:8020/source/first/2 + hdfs://nn1:8020/source/second/10 + hdfs://nn1:8020/source/second/20 When DistCp is invoked without `-update` or `-overwrite`, the DistCp defaults would create directories `first/` and `second/`, under `/target`. Thus: - distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target + distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target would yield the following contents in `/target`: - hdfs://nn2:9820/target/first/1 - hdfs://nn2:9820/target/first/2 - hdfs://nn2:9820/target/second/10 - hdfs://nn2:9820/target/second/20 + hdfs://nn2:8020/target/first/1 + hdfs://nn2:8020/target/first/2 + hdfs://nn2:8020/target/second/10 + hdfs://nn2:8020/target/second/20 When either `-update` or `-overwrite` is specified, the **contents** of the source-directories are copied to target, and not the source directories themselves. Thus: - distcp -update hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target + distcp -update hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target would yield the following contents in `/target`: - hdfs://nn2:9820/target/1 - hdfs://nn2:9820/target/2 - hdfs://nn2:9820/target/10 - hdfs://nn2:9820/target/20 + hdfs://nn2:8020/target/1 + hdfs://nn2:8020/target/2 + hdfs://nn2:8020/target/10 + hdfs://nn2:8020/target/20 By extension, if both source folders contained a file with the same name (say, `0`), then both sources would map an entry to `/target/0` at the @@ -162,27 +162,27 @@ $H3 Update and Overwrite Now, consider the following copy operation: - distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target + distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target With sources/sizes: - hdfs://nn1:9820/source/first/1 32 - hdfs://nn1:9820/source/first/2 32 - hdfs://nn1:9820/source/second/10 64 - hdfs://nn1:9820/source/second/20 32 + hdfs://nn1:8020/source/first/1 32 + hdfs://nn1:8020/source/first/2 32 + hdfs://nn1:8020/source/second/10 64 + hdfs://nn1:8020/source/second/20 32 And destination/sizes: - hdfs://nn2:9820/target/1 32 - hdfs://nn2:9820/target/10 32 - hdfs://nn2:9820/target/20 64 + hdfs://nn2:8020/target/1 32 + hdfs://nn2:8020/target/10 32 + hdfs://nn2:8020/target/20 64 Will effect: - hdfs://nn2:9820/target/1 32 - hdfs://nn2:9820/target/2 32 - hdfs://nn2:9820/target/10 64 - hdfs://nn2:9820/target/20 32 + hdfs://nn2:8020/target/1 32 + hdfs://nn2:8020/target/2 32 + hdfs://nn2:8020/target/10 64 + hdfs://nn2:8020/target/20 32 `1` is skipped because the file-length and contents match. `2` is copied because it doesn't exist at the target. `10` and `20` are overwritten since diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java index 6928cdf045b..9361fc18f3c 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java @@ -37,36 +37,36 @@ public class TestOptionsParser { @Test public void testParseIgnoreFailure() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldIgnoreFailures()); options = OptionsParser.parse(new String[] { "-i", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldIgnoreFailures()); } @Test public void testParseOverwrite() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldOverwrite()); options = OptionsParser.parse(new String[] { "-overwrite", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldOverwrite()); try { OptionsParser.parse(new String[] { "-update", "-overwrite", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("Update and overwrite aren't allowed together"); } catch (IllegalArgumentException ignore) { } @@ -75,44 +75,44 @@ public class TestOptionsParser { @Test public void testLogPath() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertNull(options.getLogPath()); options = OptionsParser.parse(new String[] { "-log", - "hdfs://localhost:9820/logs", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); - Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:9820/logs")); + "hdfs://localhost:8020/logs", + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); + Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:8020/logs")); } @Test public void testParseBlokcing() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldBlock()); options = OptionsParser.parse(new String[] { "-async", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldBlock()); } @Test public void testParsebandwidth() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMapBandwidth(), 0, DELTA); options = OptionsParser.parse(new String[] { "-bandwidth", "11.2", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMapBandwidth(), 11.2, DELTA); } @@ -121,8 +121,8 @@ public class TestOptionsParser { OptionsParser.parse(new String[] { "-bandwidth", "-11", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); } @Test(expected=IllegalArgumentException.class) @@ -130,22 +130,22 @@ public class TestOptionsParser { OptionsParser.parse(new String[] { "-bandwidth", "0", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); } @Test public void testParseSkipCRC() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldSkipCRC()); options = OptionsParser.parse(new String[] { "-update", "-skipcrccheck", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldSyncFolder()); Assert.assertTrue(options.shouldSkipCRC()); } @@ -153,22 +153,22 @@ public class TestOptionsParser { @Test public void testParseAtomicCommit() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldAtomicCommit()); options = OptionsParser.parse(new String[] { "-atomic", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldAtomicCommit()); try { OptionsParser.parse(new String[] { "-atomic", "-update", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("Atomic and sync folders were allowed"); } catch (IllegalArgumentException ignore) { } } @@ -176,30 +176,30 @@ public class TestOptionsParser { @Test public void testParseWorkPath() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options = OptionsParser.parse(new String[] { "-atomic", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options = OptionsParser.parse(new String[] { "-atomic", "-tmp", - "hdfs://localhost:9820/work", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); - Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:9820/work")); + "hdfs://localhost:8020/work", + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); + Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:8020/work")); try { OptionsParser.parse(new String[] { "-tmp", - "hdfs://localhost:9820/work", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/work", + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("work path was allowed without -atomic switch"); } catch (IllegalArgumentException ignore) {} } @@ -207,37 +207,37 @@ public class TestOptionsParser { @Test public void testParseSyncFolders() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldSyncFolder()); options = OptionsParser.parse(new String[] { "-update", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldSyncFolder()); } @Test public void testParseDeleteMissing() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldDeleteMissing()); options = OptionsParser.parse(new String[] { "-update", "-delete", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldSyncFolder()); Assert.assertTrue(options.shouldDeleteMissing()); options = OptionsParser.parse(new String[] { "-overwrite", "-delete", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldOverwrite()); Assert.assertTrue(options.shouldDeleteMissing()); @@ -245,8 +245,8 @@ public class TestOptionsParser { OptionsParser.parse(new String[] { "-atomic", "-delete", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("Atomic and delete folders were allowed"); } catch (IllegalArgumentException ignore) { } } @@ -254,38 +254,38 @@ public class TestOptionsParser { @Test public void testParseMaps() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(), DistCpConstants.DEFAULT_MAPS); options = OptionsParser.parse(new String[] { "-m", "1", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(), 1); options = OptionsParser.parse(new String[] { "-m", "0", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(), 1); try { OptionsParser.parse(new String[] { "-m", "hello", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("Non numberic map parsed"); } catch (IllegalArgumentException ignore) { } try { OptionsParser.parse(new String[] { "-mapredXslConf", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("Non numberic map parsed"); } catch (IllegalArgumentException ignore) { } } @@ -293,8 +293,8 @@ public class TestOptionsParser { @Test public void testParseNumListstatusThreads() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); // If command line argument isn't set, we expect .getNumListstatusThreads // option to be zero (so that we know when to override conf properties). Assert.assertEquals(0, options.getNumListstatusThreads()); @@ -302,23 +302,23 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "--numListstatusThreads", "12", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(12, options.getNumListstatusThreads()); options = OptionsParser.parse(new String[] { "--numListstatusThreads", "0", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(0, options.getNumListstatusThreads()); try { OptionsParser.parse(new String[] { "--numListstatusThreads", "hello", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("Non numberic numListstatusThreads parsed"); } catch (IllegalArgumentException ignore) { } @@ -326,8 +326,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "--numListstatusThreads", "100", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(DistCpOptions.MAX_NUM_LISTSTATUS_THREADS, options.getNumListstatusThreads()); } @@ -336,10 +336,10 @@ public class TestOptionsParser { public void testSourceListing() { DistCpOptions options = OptionsParser.parse(new String[] { "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getSourceFileListing(), - new Path("hdfs://localhost:9820/source/first")); + new Path("hdfs://localhost:8020/source/first")); } @Test @@ -347,9 +347,9 @@ public class TestOptionsParser { try { OptionsParser.parse(new String[] { "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.fail("Both source listing & source paths allowed"); } catch (IllegalArgumentException ignore) {} } @@ -358,7 +358,7 @@ public class TestOptionsParser { public void testMissingSourceInfo() { try { OptionsParser.parse(new String[] { - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/target/"}); Assert.fail("Neither source listing not source paths present"); } catch (IllegalArgumentException ignore) {} } @@ -367,7 +367,7 @@ public class TestOptionsParser { public void testMissingTarget() { try { OptionsParser.parse(new String[] { - "-f", "hdfs://localhost:9820/source"}); + "-f", "hdfs://localhost:8020/source"}); Assert.fail("Missing target allowed"); } catch (IllegalArgumentException ignore) {} } @@ -376,7 +376,7 @@ public class TestOptionsParser { public void testInvalidArgs() { try { OptionsParser.parse(new String[] { - "-m", "-f", "hdfs://localhost:9820/source"}); + "-m", "-f", "hdfs://localhost:8020/source"}); Assert.fail("Missing map value"); } catch (IllegalArgumentException ignore) {} } @@ -387,14 +387,14 @@ public class TestOptionsParser { "-strategy", "dynamic", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getCopyStrategy(), "dynamic"); options = OptionsParser.parse(new String[] { "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getCopyStrategy(), DistCpConstants.UNIFORMSIZE); } @@ -402,17 +402,17 @@ public class TestOptionsParser { public void testTargetPath() { DistCpOptions options = OptionsParser.parse(new String[] { "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); - Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:9820/target/")); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); + Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:8020/target/")); } @Test public void testPreserve() { DistCpOptions options = OptionsParser.parse(new String[] { "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); @@ -423,8 +423,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "-p", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); @@ -436,8 +436,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "-p", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); @@ -450,8 +450,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "-pbr", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); @@ -464,8 +464,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "-pbrgup", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); @@ -478,8 +478,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "-pbrgupcaxt", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); @@ -493,8 +493,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "-pc", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); @@ -507,8 +507,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { "-p", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 2, options.getPreserveAttributes().size()); @@ -516,15 +516,15 @@ public class TestOptionsParser { OptionsParser.parse(new String[] { "-pabcd", "-f", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target"}); Assert.fail("Invalid preserve attribute"); } catch (NoSuchElementException ignore) {} Builder builder = new DistCpOptions.Builder( - new Path("hdfs://localhost:9820/source/first"), - new Path("hdfs://localhost:9820/target/")); + new Path("hdfs://localhost:8020/source/first"), + new Path("hdfs://localhost:8020/target/")); Assert.assertFalse( builder.build().shouldPreserve(FileAttribute.PERMISSION)); builder.preserve(FileAttribute.PERMISSION); @@ -552,8 +552,8 @@ public class TestOptionsParser { DistCpOptions options = OptionsParser.parse(new String[] { "-atomic", "-i", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false)); @@ -570,8 +570,8 @@ public class TestOptionsParser { "-pu", "-bandwidth", "11.2", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false)); @@ -644,8 +644,8 @@ public class TestOptionsParser { DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false)); DistCpOptions options = OptionsParser.parse(new String[] { "-update", - "-append", "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "-append", "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean( DistCpOptionSwitch.APPEND.getConfigLabel(), false)); @@ -655,8 +655,8 @@ public class TestOptionsParser { // make sure -append is only valid when -update is specified try { OptionsParser.parse(new String[] { "-append", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail("Append should fail if update option is not specified"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( @@ -667,8 +667,8 @@ public class TestOptionsParser { try { OptionsParser.parse(new String[] { "-append", "-update", "-skipcrccheck", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail("Append should fail if skipCrc option is specified"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( @@ -687,8 +687,8 @@ public class TestOptionsParser { DistCpOptions options = OptionsParser.parse(new String[] { "-update", optionStr, "s1", "s2", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(optionLabel, false)); Assert.assertTrue(isDiff? @@ -698,8 +698,8 @@ public class TestOptionsParser { options = OptionsParser.parse(new String[] { optionStr, "s1", ".", "-update", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(optionLabel, false)); Assert.assertTrue(isDiff? @@ -710,8 +710,8 @@ public class TestOptionsParser { // -diff/-rdiff requires two option values try { OptionsParser.parse(new String[] {optionStr, "s1", "-update", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail(optionStr + " should fail with only one snapshot name"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( @@ -721,8 +721,8 @@ public class TestOptionsParser { // make sure -diff/-rdiff is only valid when -update is specified try { OptionsParser.parse(new String[] {optionStr, "s1", "s2", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail(optionStr + " should fail if -update option is not specified"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( @@ -732,8 +732,8 @@ public class TestOptionsParser { try { OptionsParser.parse(new String[] { "-diff", "s1", "s2", "-update", "-delete", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail("Should fail as -delete and -diff/-rdiff are mutually exclusive"); } catch (IllegalArgumentException e) { assertExceptionContains( @@ -743,8 +743,8 @@ public class TestOptionsParser { try { OptionsParser.parse(new String[] { "-diff", "s1", "s2", "-delete", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail("Should fail as -delete and -diff/-rdiff are mutually exclusive"); } catch (IllegalArgumentException e) { assertExceptionContains( @@ -754,8 +754,8 @@ public class TestOptionsParser { try { OptionsParser.parse(new String[] {optionStr, "s1", "s2", "-delete", "-overwrite", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail("Should fail as -delete and -diff are mutually exclusive"); } catch (IllegalArgumentException e) { assertExceptionContains( @@ -768,8 +768,8 @@ public class TestOptionsParser { optionStr, "s1", "s2", optionStrOther, "s2", "s1", "-update", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/" }); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/" }); fail(optionStr + " should fail if " + optionStrOther + " is also specified"); } catch (IllegalArgumentException e) { @@ -791,15 +791,15 @@ public class TestOptionsParser { @Test public void testExclusionsOption() { DistCpOptions options = OptionsParser.parse(new String[] { - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertNull(options.getFiltersFile()); options = OptionsParser.parse(new String[] { "-filters", "/tmp/filters.txt", - "hdfs://localhost:9820/source/first", - "hdfs://localhost:9820/target/"}); + "hdfs://localhost:8020/source/first", + "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt"); } } diff --git a/hadoop-tools/hadoop-openstack/src/site/markdown/index.md b/hadoop-tools/hadoop-openstack/src/site/markdown/index.md index 7c5e783dbfb..1815f60c613 100644 --- a/hadoop-tools/hadoop-openstack/src/site/markdown/index.md +++ b/hadoop-tools/hadoop-openstack/src/site/markdown/index.md @@ -165,7 +165,7 @@ Hadoop uses URIs to refer to files within a filesystem. Some common examples are local://etc/hosts hdfs://cluster1/users/example/data/set1 - hdfs://cluster2.example.org:9820/users/example/data/set1 + hdfs://cluster2.example.org:8020/users/example/data/set1 The Swift Filesystem Client adds a new URL type `swift`. In a Swift Filesystem URL, the hostname part of a URL identifies the container and the service to work with; the path the name of the object. Here are some examples diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json index c096229c6f0..c537195f6a6 100644 --- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json +++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json @@ -4547,7 +4547,7 @@ "yarn.nodemanager.keytab" : "/etc/krb5.keytab", "mapreduce.task.io.sort.factor" : "10", "yarn.nodemanager.disk-health-checker.interval-ms" : "120000", - "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins", + "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins", "yarn.admin.acl" : "*", "mapreduce.job.speculative.speculativecap" : "0.1", "dfs.namenode.num.checkpoints.retained" : "2", @@ -4795,7 +4795,7 @@ "ftp.stream-buffer-size" : "4096", "dfs.namenode.avoid.write.stale.datanode" : "false", "hadoop.security.group.mapping.ldap.search.attr.member" : "member", - "mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-1", + "mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-1", "dfs.blockreport.initialDelay" : "0", "yarn.nm.liveness-monitor.expiry-interval-ms" : "600000", "hadoop.http.authentication.token.validity" : "36000", @@ -4839,7 +4839,7 @@ "hadoop.security.auth_to_local" : "DEFAULT", "dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}", "ftp.client-write-packet-size" : "65536", - "fs.defaultFS" : "hdfs://a2115.smile.com:9820", + "fs.defaultFS" : "hdfs://a2115.smile.com:8020", "yarn.nodemanager.address" : "0.0.0.0:0", "yarn.scheduler.fair.assignmultiple" : "true", "yarn.resourcemanager.scheduler.client.thread-count" : "50", @@ -9628,7 +9628,7 @@ "yarn.nodemanager.keytab" : "/etc/krb5.keytab", "mapreduce.task.io.sort.factor" : "10", "yarn.nodemanager.disk-health-checker.interval-ms" : "120000", - "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins", + "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins", "yarn.admin.acl" : "*", "mapreduce.job.speculative.speculativecap" : "0.1", "dfs.namenode.num.checkpoints.retained" : "2", @@ -9876,7 +9876,7 @@ "ftp.stream-buffer-size" : "4096", "dfs.namenode.avoid.write.stale.datanode" : "false", "hadoop.security.group.mapping.ldap.search.attr.member" : "member", - "mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-2", + "mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-2", "dfs.blockreport.initialDelay" : "0", "yarn.nm.liveness-monitor.expiry-interval-ms" : "600000", "hadoop.http.authentication.token.validity" : "36000", @@ -9920,7 +9920,7 @@ "hadoop.security.auth_to_local" : "DEFAULT", "dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}", "ftp.client-write-packet-size" : "65536", - "fs.defaultFS" : "hdfs://a2115.smile.com:9820", + "fs.defaultFS" : "hdfs://a2115.smile.com:8020", "yarn.nodemanager.address" : "0.0.0.0:0", "yarn.scheduler.fair.assignmultiple" : "true", "yarn.resourcemanager.scheduler.client.thread-count" : "50", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java index cd877b27383..91602e1d3b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java @@ -147,7 +147,7 @@ public class RegistryTestHelper extends Assert { Map url = addressList.get(0); String addr = url.get("uri"); assertTrue(addr.contains("http")); - assertTrue(addr.contains(":9820")); + assertTrue(addr.contains(":8020")); Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2); assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT, @@ -275,7 +275,7 @@ public class RegistryTestHelper extends Assert { new URI("http", hostname + ":80", "/"))); entry.addExternalEndpoint( restEndpoint(API_WEBHDFS, - new URI("http", hostname + ":9820", "/"))); + new URI("http", hostname + ":8020", "/"))); Endpoint endpoint = ipcEndpoint(API_HDFS, null); endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java index 6ceaa750f39..05ead9df4ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java @@ -64,7 +64,7 @@ public class TestPBRecordImpl { LocalResource ret = recordFactory.newRecordInstance(LocalResource.class); assertTrue(ret instanceof LocalResourcePBImpl); ret.setResource(URL.fromPath(new Path( - "hdfs://y.ak:9820/foo/bar"))); + "hdfs://y.ak:8020/foo/bar"))); ret.setSize(4344L); ret.setTimestamp(3141592653589793L); ret.setVisibility(LocalResourceVisibility.PUBLIC);