HDFS-12990. Change default NameNode RPC port back to 8020. Contributed by Xiao Chen.

This commit is contained in:
Anu Engineer 2018-02-06 13:43:45 -08:00
parent 2dd960de98
commit 4304fcd5bd
26 changed files with 257 additions and 257 deletions

View File

@ -98,7 +98,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/**
* CallQueue related settings. These are not used directly, but rather
* combined with a namespace and port. For instance:
* IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY
* IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
*/
public static final String IPC_NAMESPACE = "ipc";
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";

View File

@ -83,11 +83,11 @@ import org.slf4j.LoggerFactory;
*
* <p>Examples:</p>
* <p><blockquote><pre>
* $ bin/hadoop dfs -fs darwin:9820 -ls /data
* list /data directory in dfs with namenode darwin:9820
* $ bin/hadoop dfs -fs darwin:8020 -ls /data
* list /data directory in dfs with namenode darwin:8020
*
* $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data
* list /data directory in dfs with namenode darwin:9820
* $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
* list /data directory in dfs with namenode darwin:8020
*
* $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
* list /data directory in dfs with multiple conf files specified.

View File

@ -47,6 +47,6 @@ public class TestDelegateToFileSystem {
@Test
public void testDefaultURIwithPort() throws Exception {
testDefaultUriInternal("hdfs://dummyhost:9820");
testDefaultUriInternal("hdfs://dummyhost:8020");
}
}

View File

@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort {
private static String TEST_FENCING_HOST = System.getProperty(
"test.TestSshFenceByTcpPort.host", "localhost");
private static final String TEST_FENCING_PORT = System.getProperty(
"test.TestSshFenceByTcpPort.port", "9820");
"test.TestSshFenceByTcpPort.port", "8020");
private static final String TEST_KEYFILE = System.getProperty(
"test.TestSshFenceByTcpPort.key");

View File

@ -73,7 +73,7 @@ public interface HdfsClientConfigKeys {
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
"dfs.namenode.kerberos.principal";
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";

View File

@ -70,10 +70,10 @@ public class TestRequestHedgingProxyProvider {
HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
conf.set(
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
"machine1.foo.bar:9820");
"machine1.foo.bar:8020");
conf.set(
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
"machine2.foo.bar:9820");
"machine2.foo.bar:8020");
}
@Test
@ -294,7 +294,7 @@ public class TestRequestHedgingProxyProvider {
conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
"nn1,nn2,nn3");
conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
"machine3.foo.bar:9820");
"machine3.foo.bar:8020");
final AtomicInteger counter = new AtomicInteger(0);
final int[] isGood = {1};

View File

@ -37,7 +37,7 @@
RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
dfs.namenode.rpc-address.EXAMPLENAMESERVICE
The value of this property will take the form of nn-host1:rpc-port. The NameNode's default RPC port is 9820.
The value of this property will take the form of nn-host1:rpc-port. The NameNode's default RPC port is 8020.
</description>
</property>

View File

@ -119,15 +119,15 @@ The order in which you set these configurations is unimportant, but the values y
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>machine1.example.com:9820</value>
<value>machine1.example.com:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>machine2.example.com:9820</value>
<value>machine2.example.com:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
<value>machine3.example.com:9820</value>
<value>machine3.example.com:8020</value>
</property>
**Note:** You may similarly configure the "**servicerpc-address**" setting if

View File

@ -132,15 +132,15 @@ The order in which you set these configurations is unimportant, but the values y
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>machine1.example.com:9820</value>
<value>machine1.example.com:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>machine2.example.com:9820</value>
<value>machine2.example.com:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
<value>machine3.example.com:9820</value>
<value>machine3.example.com:8020</value>
</property>
**Note:** You may similarly configure the "**servicerpc-address**" setting if you so desire.

View File

@ -291,7 +291,7 @@ We are going to remove the file test1.
The comment below shows that the file has been moved to Trash directory.
$ hadoop fs -rm -r delete/test1
Moved: hdfs://localhost:9820/user/hadoop/delete/test1 to trash at: hdfs://localhost:9820/user/hadoop/.Trash/Current
Moved: hdfs://localhost:8020/user/hadoop/delete/test1 to trash at: hdfs://localhost:8020/user/hadoop/.Trash/Current
now we are going to remove the file with skipTrash option,
which will not send the file to Trash.It will be completely removed from HDFS.

View File

@ -104,40 +104,40 @@ The authority following the `viewfs://` scheme in the URI is the mount table nam
The mount points of a mount table are specified in the standard Hadoop configuration files. All the mount table config entries for `viewfs` are prefixed by `fs.viewfs.mounttable.`. The mount points that are linking other filesystems are specified using `link` tags. The recommendation is to have mount points name same as in the linked filesystem target locations. For all namespaces that are not configured in the mount table, we can have them fallback to a default filesystem via `linkFallback`.
In the below mount table configuration, namespace `/data` is linked to the filesystem `hdfs://nn1-clusterx.example.com:9820/data`, `/project` is linked to the filesystem `hdfs://nn2-clusterx.example.com:9820/project`. All namespaces that are not configured in the mount table, like `/logs` are linked to the filesystem `hdfs://nn5-clusterx.example.com:9820/home`.
In the below mount table configuration, namespace `/data` is linked to the filesystem `hdfs://nn1-clusterx.example.com:8020/data`, `/project` is linked to the filesystem `hdfs://nn2-clusterx.example.com:8020/project`. All namespaces that are not configured in the mount table, like `/logs` are linked to the filesystem `hdfs://nn5-clusterx.example.com:8020/home`.
```xml
<configuration>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./data</name>
<value>hdfs://nn1-clusterx.example.com:9820/data</value>
<value>hdfs://nn1-clusterx.example.com:8020/data</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./project</name>
<value>hdfs://nn2-clusterx.example.com:9820/project</value>
<value>hdfs://nn2-clusterx.example.com:8020/project</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./user</name>
<value>hdfs://nn3-clusterx.example.com:9820/user</value>
<value>hdfs://nn3-clusterx.example.com:8020/user</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
<value>hdfs://nn4-clusterx.example.com:9820/tmp</value>
<value>hdfs://nn4-clusterx.example.com:8020/tmp</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.linkFallback</name>
<value>hdfs://nn5-clusterx.example.com:9820/home</value>
<value>hdfs://nn5-clusterx.example.com:8020/home</value>
</property>
</configuration>
```
Alternatively we can have the mount table's root merged with the root of another filesystem via `linkMergeSlash`. In the below mount table configuration, ClusterY's root is merged with the root filesystem at `hdfs://nn1-clustery.example.com:9820`.
Alternatively we can have the mount table's root merged with the root of another filesystem via `linkMergeSlash`. In the below mount table configuration, ClusterY's root is merged with the root filesystem at `hdfs://nn1-clustery.example.com:8020`.
```xml
<configuration>
<property>
<name>fs.viewfs.mounttable.ClusterY.linkMergeSlash</name>
<value>hdfs://nn1-clustery.example.com:9820/</value>
<value>hdfs://nn1-clustery.example.com:8020/</value>
</property>
</configuration>
```
@ -237,11 +237,11 @@ The mount tables can be described in `core-site.xml` but it is better to use ind
In the file `mountTable.xml`, there is a definition of the mount table "ClusterX" for the hypothetical cluster that is a federation of the three namespace volumes managed by the three namenodes
1. nn1-clusterx.example.com:9820,
2. nn2-clusterx.example.com:9820, and
3. nn3-clusterx.example.com:9820.
1. nn1-clusterx.example.com:8020,
2. nn2-clusterx.example.com:8020, and
3. nn3-clusterx.example.com:8020.
Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:9820, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:8020, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
```xml
<configuration>
@ -251,19 +251,19 @@ Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.ex
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./home</name>
<value>hdfs://nn1-clusterx.example.com:9820/home</value>
<value>hdfs://nn1-clusterx.example.com:8020/home</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
<value>hdfs://nn1-clusterx.example.com:9820/tmp</value>
<value>hdfs://nn1-clusterx.example.com:8020/tmp</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./projects/foo</name>
<value>hdfs://nn2-clusterx.example.com:9820/projects/foo</value>
<value>hdfs://nn2-clusterx.example.com:8020/projects/foo</value>
</property>
<property>
<name>fs.viewfs.mounttable.ClusterX.link./projects/bar</name>
<value>hdfs://nn3-clusterx.example.com:9820/projects/bar</value>
<value>hdfs://nn3-clusterx.example.com:8020/projects/bar</value>
</property>
</configuration>
```

View File

@ -312,7 +312,7 @@ public class TestDFSClientFailover {
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + service,
namenode);
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + service + "."
+ namenode, "localhost:9820");
+ namenode, "localhost:8020");
// call createProxy implicitly and explicitly
Path p = new Path("/");

View File

@ -84,9 +84,9 @@ import com.google.common.collect.Sets;
public class TestDFSUtil {
static final String NS1_NN_ADDR = "ns1-nn.example.com:9820";
static final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820";
static final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820";
static final String NS1_NN_ADDR = "ns1-nn.example.com:8020";
static final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
static final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
/**
* Reset to default UGI settings since some tests change them.
@ -478,7 +478,7 @@ public class TestDFSUtil {
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
"localhost", 9820), conf, "http");
"localhost", 8020), conf, "http");
assertEquals(
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
httpAddress);
@ -488,10 +488,10 @@ public class TestDFSUtil {
public void testHANameNodesWithFederation() throws URISyntaxException {
HdfsConfiguration conf = new HdfsConfiguration();
final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
final String NS2_NN1_HOST = "ns2-nn1.example.com:9820";
final String NS2_NN2_HOST = "ns2-nn2.example.com:9820";
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
// Two nameservices, each with two NNs.
@ -555,9 +555,9 @@ public class TestDFSUtil {
HdfsConfiguration conf = new HdfsConfiguration();
// One nameservice with two NNs
final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
final String NS1_NN2_HOST = "ns1-nn1.example.com:9820";
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
conf.set(DFS_NAMESERVICES, "ns1");
@ -641,10 +641,10 @@ public class TestDFSUtil {
public void testGetNNUris() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
final String NS2_NN_ADDR = "ns2-nn.example.com:9820";
final String NN1_ADDR = "nn.example.com:9820";
final String NS2_NN_ADDR = "ns2-nn.example.com:8020";
final String NN1_ADDR = "nn.example.com:8020";
final String NN1_SRVC_ADDR = "nn.example.com:9821";
final String NN2_ADDR = "nn2.example.com:9820";
final String NN2_ADDR = "nn2.example.com:8020";
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(
@ -822,7 +822,7 @@ public class TestDFSUtil {
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
// it will automatically convert it to hostname
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820");
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
Collection<URI> uris = getInternalNameServiceUris(conf);
assertEquals(1, uris.size());
for (URI uri : uris) {

View File

@ -1148,7 +1148,7 @@ public class TestQuota {
@Test
public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820");
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
DFSAdmin admin = new DFSAdmin(conf);
ByteArrayOutputStream err = new ByteArrayOutputStream();
PrintStream oldErr = System.err;

View File

@ -353,7 +353,7 @@ public class TestBlockTokenWithDFS {
try {
// prefer non-ephemeral port to avoid port collision on restartNameNode
cluster = new MiniDFSCluster.Builder(conf)
.nameNodePort(ServerSocketUtil.getPort(19820, 100))
.nameNodePort(ServerSocketUtil.getPort(18020, 100))
.nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
.numDataNodes(numDataNodes)
.build();

View File

@ -79,7 +79,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
}
cluster = new MiniDFSCluster.Builder(conf)
.nameNodePort(ServerSocketUtil.getPort(19820, 100))
.nameNodePort(ServerSocketUtil.getPort(18020, 100))
.nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
.numDataNodes(numDNs)
.build();

View File

@ -100,7 +100,7 @@ public class TestBlockPoolManager {
public void testSimpleSingleNS() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,
"hdfs://mock1:9820");
"hdfs://mock1:8020");
bpm.refreshNamenodes(conf);
assertEquals("create #1\n", log.toString());
}
@ -110,8 +110,8 @@ public class TestBlockPoolManager {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns1,ns2");
addNN(conf, "ns1", "mock1:9820");
addNN(conf, "ns2", "mock1:9820");
addNN(conf, "ns1", "mock1:8020");
addNN(conf, "ns2", "mock1:8020");
bpm.refreshNamenodes(conf);
assertEquals(
"create #1\n" +
@ -141,9 +141,9 @@ public class TestBlockPoolManager {
public void testInternalNameService() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3");
addNN(conf, "ns1", "mock1:9820");
addNN(conf, "ns2", "mock1:9820");
addNN(conf, "ns3", "mock1:9820");
addNN(conf, "ns1", "mock1:8020");
addNN(conf, "ns2", "mock1:8020");
addNN(conf, "ns3", "mock1:8020");
conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
bpm.refreshNamenodes(conf);
assertEquals("create #1\n", log.toString());

View File

@ -163,8 +163,8 @@ public class TestAllowFormat {
// is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
// is considered.
String localhost = "127.0.0.1";
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 9820);
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9820);
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 8020);
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
conf.set(DFS_NAMENODE_NAME_DIR_KEY,

View File

@ -367,7 +367,7 @@ public class TestGetConf {
public void testGetJournalNodes() throws Exception {
final int nsCount = 3;
final String journalsBaseUri = "qjournal://jn0:9820;jn1:9820;jn2:9820";
final String journalsBaseUri = "qjournal://jn0:8020;jn1:8020;jn2:8020";
setupStaticHostResolution(nsCount, "jn");
// With out Name service Id
@ -490,7 +490,7 @@ public class TestGetConf {
@Test(expected = UnknownHostException.class, timeout = 10000)
public void testUnknownJournalNodeHost()
throws URISyntaxException, IOException {
String journalsBaseUri = "qjournal://jn1:9820;jn2:9820;jn3:9820";
String journalsBaseUri = "qjournal://jn1:8020;jn2:8020;jn3:8020";
HdfsConfiguration conf = new HdfsConfiguration(false);
conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
journalsBaseUri + "/jndata");
@ -504,7 +504,7 @@ public class TestGetConf {
public void testJournalNodeUriError()
throws URISyntaxException, IOException {
final int nsCount = 3;
String journalsBaseUri = "qjournal://jn0 :9820;jn1:9820;jn2:9820";
String journalsBaseUri = "qjournal://jn0 :8020;jn1:8020;jn2:8020";
setupStaticHostResolution(nsCount, "jn");
HdfsConfiguration conf = new HdfsConfiguration(false);
conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,

View File

@ -63,8 +63,8 @@ $H3 Basic Usage
The most common invocation of DistCp is an inter-cluster copy:
bash$ hadoop distcp hdfs://nn1:9820/foo/bar \
hdfs://nn2:9820/bar/foo
bash$ hadoop distcp hdfs://nn1:8020/foo/bar \
hdfs://nn2:8020/bar/foo
This will expand the namespace under `/foo/bar` on nn1 into a temporary file,
partition its contents among a set of map tasks, and start a copy on each
@ -72,19 +72,19 @@ $H3 Basic Usage
One can also specify multiple source directories on the command line:
bash$ hadoop distcp hdfs://nn1:9820/foo/a \
hdfs://nn1:9820/foo/b \
hdfs://nn2:9820/bar/foo
bash$ hadoop distcp hdfs://nn1:8020/foo/a \
hdfs://nn1:8020/foo/b \
hdfs://nn2:8020/bar/foo
Or, equivalently, from a file using the -f option:
bash$ hadoop distcp -f hdfs://nn1:9820/srclist \
hdfs://nn2:9820/bar/foo
bash$ hadoop distcp -f hdfs://nn1:8020/srclist \
hdfs://nn2:8020/bar/foo
Where `srclist` contains
hdfs://nn1:9820/foo/a
hdfs://nn1:9820/foo/b
hdfs://nn1:8020/foo/a
hdfs://nn1:8020/foo/b
When copying from multiple sources, DistCp will abort the copy with an error
message if two sources collide, but collisions at the destination are
@ -126,35 +126,35 @@ $H3 Update and Overwrite
Consider a copy from `/source/first/` and `/source/second/` to `/target/`,
where the source paths have the following contents:
hdfs://nn1:9820/source/first/1
hdfs://nn1:9820/source/first/2
hdfs://nn1:9820/source/second/10
hdfs://nn1:9820/source/second/20
hdfs://nn1:8020/source/first/1
hdfs://nn1:8020/source/first/2
hdfs://nn1:8020/source/second/10
hdfs://nn1:8020/source/second/20
When DistCp is invoked without `-update` or `-overwrite`, the DistCp defaults
would create directories `first/` and `second/`, under `/target`. Thus:
distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
would yield the following contents in `/target`:
hdfs://nn2:9820/target/first/1
hdfs://nn2:9820/target/first/2
hdfs://nn2:9820/target/second/10
hdfs://nn2:9820/target/second/20
hdfs://nn2:8020/target/first/1
hdfs://nn2:8020/target/first/2
hdfs://nn2:8020/target/second/10
hdfs://nn2:8020/target/second/20
When either `-update` or `-overwrite` is specified, the **contents** of the
source-directories are copied to target, and not the source directories
themselves. Thus:
distcp -update hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
distcp -update hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
would yield the following contents in `/target`:
hdfs://nn2:9820/target/1
hdfs://nn2:9820/target/2
hdfs://nn2:9820/target/10
hdfs://nn2:9820/target/20
hdfs://nn2:8020/target/1
hdfs://nn2:8020/target/2
hdfs://nn2:8020/target/10
hdfs://nn2:8020/target/20
By extension, if both source folders contained a file with the same name
(say, `0`), then both sources would map an entry to `/target/0` at the
@ -162,27 +162,27 @@ $H3 Update and Overwrite
Now, consider the following copy operation:
distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
With sources/sizes:
hdfs://nn1:9820/source/first/1 32
hdfs://nn1:9820/source/first/2 32
hdfs://nn1:9820/source/second/10 64
hdfs://nn1:9820/source/second/20 32
hdfs://nn1:8020/source/first/1 32
hdfs://nn1:8020/source/first/2 32
hdfs://nn1:8020/source/second/10 64
hdfs://nn1:8020/source/second/20 32
And destination/sizes:
hdfs://nn2:9820/target/1 32
hdfs://nn2:9820/target/10 32
hdfs://nn2:9820/target/20 64
hdfs://nn2:8020/target/1 32
hdfs://nn2:8020/target/10 32
hdfs://nn2:8020/target/20 64
Will effect:
hdfs://nn2:9820/target/1 32
hdfs://nn2:9820/target/2 32
hdfs://nn2:9820/target/10 64
hdfs://nn2:9820/target/20 32
hdfs://nn2:8020/target/1 32
hdfs://nn2:8020/target/2 32
hdfs://nn2:8020/target/10 64
hdfs://nn2:8020/target/20 32
`1` is skipped because the file-length and contents match. `2` is copied
because it doesn't exist at the target. `10` and `20` are overwritten since

View File

@ -37,36 +37,36 @@ public class TestOptionsParser {
@Test
public void testParseIgnoreFailure() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldIgnoreFailures());
options = OptionsParser.parse(new String[] {
"-i",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldIgnoreFailures());
}
@Test
public void testParseOverwrite() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldOverwrite());
options = OptionsParser.parse(new String[] {
"-overwrite",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldOverwrite());
try {
OptionsParser.parse(new String[] {
"-update",
"-overwrite",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Update and overwrite aren't allowed together");
} catch (IllegalArgumentException ignore) {
}
@ -75,44 +75,44 @@ public class TestOptionsParser {
@Test
public void testLogPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getLogPath());
options = OptionsParser.parse(new String[] {
"-log",
"hdfs://localhost:9820/logs",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:9820/logs"));
"hdfs://localhost:8020/logs",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:8020/logs"));
}
@Test
public void testParseBlokcing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldBlock());
options = OptionsParser.parse(new String[] {
"-async",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldBlock());
}
@Test
public void testParsebandwidth() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMapBandwidth(), 0, DELTA);
options = OptionsParser.parse(new String[] {
"-bandwidth",
"11.2",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMapBandwidth(), 11.2, DELTA);
}
@ -121,8 +121,8 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-bandwidth",
"-11",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
}
@Test(expected=IllegalArgumentException.class)
@ -130,22 +130,22 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-bandwidth",
"0",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
}
@Test
public void testParseSkipCRC() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldSkipCRC());
options = OptionsParser.parse(new String[] {
"-update",
"-skipcrccheck",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldSkipCRC());
}
@ -153,22 +153,22 @@ public class TestOptionsParser {
@Test
public void testParseAtomicCommit() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldAtomicCommit());
options = OptionsParser.parse(new String[] {
"-atomic",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldAtomicCommit());
try {
OptionsParser.parse(new String[] {
"-atomic",
"-update",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Atomic and sync folders were allowed");
} catch (IllegalArgumentException ignore) { }
}
@ -176,30 +176,30 @@ public class TestOptionsParser {
@Test
public void testParseWorkPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getAtomicWorkPath());
options = OptionsParser.parse(new String[] {
"-atomic",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getAtomicWorkPath());
options = OptionsParser.parse(new String[] {
"-atomic",
"-tmp",
"hdfs://localhost:9820/work",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:9820/work"));
"hdfs://localhost:8020/work",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:8020/work"));
try {
OptionsParser.parse(new String[] {
"-tmp",
"hdfs://localhost:9820/work",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/work",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("work path was allowed without -atomic switch");
} catch (IllegalArgumentException ignore) {}
}
@ -207,37 +207,37 @@ public class TestOptionsParser {
@Test
public void testParseSyncFolders() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldSyncFolder());
options = OptionsParser.parse(new String[] {
"-update",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
}
@Test
public void testParseDeleteMissing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldDeleteMissing());
options = OptionsParser.parse(new String[] {
"-update",
"-delete",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldDeleteMissing());
options = OptionsParser.parse(new String[] {
"-overwrite",
"-delete",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldOverwrite());
Assert.assertTrue(options.shouldDeleteMissing());
@ -245,8 +245,8 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-atomic",
"-delete",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Atomic and delete folders were allowed");
} catch (IllegalArgumentException ignore) { }
}
@ -254,38 +254,38 @@ public class TestOptionsParser {
@Test
public void testParseMaps() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMaxMaps(), DistCpConstants.DEFAULT_MAPS);
options = OptionsParser.parse(new String[] {
"-m",
"1",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMaxMaps(), 1);
options = OptionsParser.parse(new String[] {
"-m",
"0",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getMaxMaps(), 1);
try {
OptionsParser.parse(new String[] {
"-m",
"hello",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Non numberic map parsed");
} catch (IllegalArgumentException ignore) { }
try {
OptionsParser.parse(new String[] {
"-mapredXslConf",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Non numberic map parsed");
} catch (IllegalArgumentException ignore) { }
}
@ -293,8 +293,8 @@ public class TestOptionsParser {
@Test
public void testParseNumListstatusThreads() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
// If command line argument isn't set, we expect .getNumListstatusThreads
// option to be zero (so that we know when to override conf properties).
Assert.assertEquals(0, options.getNumListstatusThreads());
@ -302,23 +302,23 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"12",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(12, options.getNumListstatusThreads());
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"0",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(0, options.getNumListstatusThreads());
try {
OptionsParser.parse(new String[] {
"--numListstatusThreads",
"hello",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Non numberic numListstatusThreads parsed");
} catch (IllegalArgumentException ignore) { }
@ -326,8 +326,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"--numListstatusThreads",
"100",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(DistCpOptions.MAX_NUM_LISTSTATUS_THREADS,
options.getNumListstatusThreads());
}
@ -336,10 +336,10 @@ public class TestOptionsParser {
public void testSourceListing() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getSourceFileListing(),
new Path("hdfs://localhost:9820/source/first"));
new Path("hdfs://localhost:8020/source/first"));
}
@Test
@ -347,9 +347,9 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.fail("Both source listing & source paths allowed");
} catch (IllegalArgumentException ignore) {}
}
@ -358,7 +358,7 @@ public class TestOptionsParser {
public void testMissingSourceInfo() {
try {
OptionsParser.parse(new String[] {
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/target/"});
Assert.fail("Neither source listing not source paths present");
} catch (IllegalArgumentException ignore) {}
}
@ -367,7 +367,7 @@ public class TestOptionsParser {
public void testMissingTarget() {
try {
OptionsParser.parse(new String[] {
"-f", "hdfs://localhost:9820/source"});
"-f", "hdfs://localhost:8020/source"});
Assert.fail("Missing target allowed");
} catch (IllegalArgumentException ignore) {}
}
@ -376,7 +376,7 @@ public class TestOptionsParser {
public void testInvalidArgs() {
try {
OptionsParser.parse(new String[] {
"-m", "-f", "hdfs://localhost:9820/source"});
"-m", "-f", "hdfs://localhost:8020/source"});
Assert.fail("Missing map value");
} catch (IllegalArgumentException ignore) {}
}
@ -387,14 +387,14 @@ public class TestOptionsParser {
"-strategy",
"dynamic",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getCopyStrategy(), "dynamic");
options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getCopyStrategy(), DistCpConstants.UNIFORMSIZE);
}
@ -402,17 +402,17 @@ public class TestOptionsParser {
public void testTargetPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:9820/target/"));
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:8020/target/"));
}
@Test
public void testPreserve() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@ -423,8 +423,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-p",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -436,8 +436,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-p",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -450,8 +450,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pbr",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@ -464,8 +464,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pbrgup",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -478,8 +478,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pbrgupcaxt",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@ -493,8 +493,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-pc",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@ -507,8 +507,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
"-p",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 2,
options.getPreserveAttributes().size());
@ -516,15 +516,15 @@ public class TestOptionsParser {
OptionsParser.parse(new String[] {
"-pabcd",
"-f",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target"});
Assert.fail("Invalid preserve attribute");
}
catch (NoSuchElementException ignore) {}
Builder builder = new DistCpOptions.Builder(
new Path("hdfs://localhost:9820/source/first"),
new Path("hdfs://localhost:9820/target/"));
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"));
Assert.assertFalse(
builder.build().shouldPreserve(FileAttribute.PERMISSION));
builder.preserve(FileAttribute.PERMISSION);
@ -552,8 +552,8 @@ public class TestOptionsParser {
DistCpOptions options = OptionsParser.parse(new String[] {
"-atomic",
"-i",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
@ -570,8 +570,8 @@ public class TestOptionsParser {
"-pu",
"-bandwidth",
"11.2",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
@ -644,8 +644,8 @@ public class TestOptionsParser {
DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
"-append", "hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"-append", "hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(
DistCpOptionSwitch.APPEND.getConfigLabel(), false));
@ -655,8 +655,8 @@ public class TestOptionsParser {
// make sure -append is only valid when -update is specified
try {
OptionsParser.parse(new String[] { "-append",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("Append should fail if update option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -667,8 +667,8 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] {
"-append", "-update", "-skipcrccheck",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("Append should fail if skipCrc option is specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -687,8 +687,8 @@ public class TestOptionsParser {
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
optionStr, "s1", "s2",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(optionLabel, false));
Assert.assertTrue(isDiff?
@ -698,8 +698,8 @@ public class TestOptionsParser {
options = OptionsParser.parse(new String[] {
optionStr, "s1", ".", "-update",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(optionLabel, false));
Assert.assertTrue(isDiff?
@ -710,8 +710,8 @@ public class TestOptionsParser {
// -diff/-rdiff requires two option values
try {
OptionsParser.parse(new String[] {optionStr, "s1", "-update",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail(optionStr + " should fail with only one snapshot name");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -721,8 +721,8 @@ public class TestOptionsParser {
// make sure -diff/-rdiff is only valid when -update is specified
try {
OptionsParser.parse(new String[] {optionStr, "s1", "s2",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail(optionStr + " should fail if -update option is not specified");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
@ -732,8 +732,8 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] {
"-diff", "s1", "s2", "-update", "-delete",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
} catch (IllegalArgumentException e) {
assertExceptionContains(
@ -743,8 +743,8 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] {
"-diff", "s1", "s2", "-delete",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
} catch (IllegalArgumentException e) {
assertExceptionContains(
@ -754,8 +754,8 @@ public class TestOptionsParser {
try {
OptionsParser.parse(new String[] {optionStr, "s1", "s2",
"-delete", "-overwrite",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail("Should fail as -delete and -diff are mutually exclusive");
} catch (IllegalArgumentException e) {
assertExceptionContains(
@ -768,8 +768,8 @@ public class TestOptionsParser {
optionStr, "s1", "s2",
optionStrOther, "s2", "s1",
"-update",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/" });
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/" });
fail(optionStr + " should fail if " + optionStrOther
+ " is also specified");
} catch (IllegalArgumentException e) {
@ -791,15 +791,15 @@ public class TestOptionsParser {
@Test
public void testExclusionsOption() {
DistCpOptions options = OptionsParser.parse(new String[] {
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertNull(options.getFiltersFile());
options = OptionsParser.parse(new String[] {
"-filters",
"/tmp/filters.txt",
"hdfs://localhost:9820/source/first",
"hdfs://localhost:9820/target/"});
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt");
}
}

View File

@ -165,7 +165,7 @@ Hadoop uses URIs to refer to files within a filesystem. Some common examples are
local://etc/hosts
hdfs://cluster1/users/example/data/set1
hdfs://cluster2.example.org:9820/users/example/data/set1
hdfs://cluster2.example.org:8020/users/example/data/set1
The Swift Filesystem Client adds a new URL type `swift`. In a Swift Filesystem URL, the hostname part of a URL identifies the container and the service to work with; the path the name of the object. Here are some examples

View File

@ -4547,7 +4547,7 @@
"yarn.nodemanager.keytab" : "/etc/krb5.keytab",
"mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
"yarn.admin.acl" : "*",
"mapreduce.job.speculative.speculativecap" : "0.1",
"dfs.namenode.num.checkpoints.retained" : "2",
@ -4795,7 +4795,7 @@
"ftp.stream-buffer-size" : "4096",
"dfs.namenode.avoid.write.stale.datanode" : "false",
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-1",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-1",
"dfs.blockreport.initialDelay" : "0",
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
"hadoop.http.authentication.token.validity" : "36000",
@ -4839,7 +4839,7 @@
"hadoop.security.auth_to_local" : "DEFAULT",
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
"ftp.client-write-packet-size" : "65536",
"fs.defaultFS" : "hdfs://a2115.smile.com:9820",
"fs.defaultFS" : "hdfs://a2115.smile.com:8020",
"yarn.nodemanager.address" : "0.0.0.0:0",
"yarn.scheduler.fair.assignmultiple" : "true",
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
@ -9628,7 +9628,7 @@
"yarn.nodemanager.keytab" : "/etc/krb5.keytab",
"mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
"yarn.admin.acl" : "*",
"mapreduce.job.speculative.speculativecap" : "0.1",
"dfs.namenode.num.checkpoints.retained" : "2",
@ -9876,7 +9876,7 @@
"ftp.stream-buffer-size" : "4096",
"dfs.namenode.avoid.write.stale.datanode" : "false",
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-2",
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-2",
"dfs.blockreport.initialDelay" : "0",
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
"hadoop.http.authentication.token.validity" : "36000",
@ -9920,7 +9920,7 @@
"hadoop.security.auth_to_local" : "DEFAULT",
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
"ftp.client-write-packet-size" : "65536",
"fs.defaultFS" : "hdfs://a2115.smile.com:9820",
"fs.defaultFS" : "hdfs://a2115.smile.com:8020",
"yarn.nodemanager.address" : "0.0.0.0:0",
"yarn.scheduler.fair.assignmultiple" : "true",
"yarn.resourcemanager.scheduler.client.thread-count" : "50",

View File

@ -147,7 +147,7 @@ public class RegistryTestHelper extends Assert {
Map<String, String> url = addressList.get(0);
String addr = url.get("uri");
assertTrue(addr.contains("http"));
assertTrue(addr.contains(":9820"));
assertTrue(addr.contains(":8020"));
Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2);
assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT,
@ -275,7 +275,7 @@ public class RegistryTestHelper extends Assert {
new URI("http", hostname + ":80", "/")));
entry.addExternalEndpoint(
restEndpoint(API_WEBHDFS,
new URI("http", hostname + ":9820", "/")));
new URI("http", hostname + ":8020", "/")));
Endpoint endpoint = ipcEndpoint(API_HDFS, null);
endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030));

View File

@ -64,7 +64,7 @@ public class TestPBRecordImpl {
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
assertTrue(ret instanceof LocalResourcePBImpl);
ret.setResource(URL.fromPath(new Path(
"hdfs://y.ak:9820/foo/bar")));
"hdfs://y.ak:8020/foo/bar")));
ret.setSize(4344L);
ret.setTimestamp(3141592653589793L);
ret.setVisibility(LocalResourceVisibility.PUBLIC);