HDFS-12990. Change default NameNode RPC port back to 8020. Contributed by Xiao Chen.
(cherry picked from commit 4304fcd5bd
)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
This commit is contained in:
parent
cea25ac63a
commit
9264f10bb3
|
@ -98,7 +98,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
||||||
/**
|
/**
|
||||||
* CallQueue related settings. These are not used directly, but rather
|
* CallQueue related settings. These are not used directly, but rather
|
||||||
* combined with a namespace and port. For instance:
|
* combined with a namespace and port. For instance:
|
||||||
* IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY
|
* IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
|
||||||
*/
|
*/
|
||||||
public static final String IPC_NAMESPACE = "ipc";
|
public static final String IPC_NAMESPACE = "ipc";
|
||||||
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
|
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
|
||||||
|
|
|
@ -83,11 +83,11 @@ import org.slf4j.LoggerFactory;
|
||||||
*
|
*
|
||||||
* <p>Examples:</p>
|
* <p>Examples:</p>
|
||||||
* <p><blockquote><pre>
|
* <p><blockquote><pre>
|
||||||
* $ bin/hadoop dfs -fs darwin:9820 -ls /data
|
* $ bin/hadoop dfs -fs darwin:8020 -ls /data
|
||||||
* list /data directory in dfs with namenode darwin:9820
|
* list /data directory in dfs with namenode darwin:8020
|
||||||
*
|
*
|
||||||
* $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data
|
* $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
|
||||||
* list /data directory in dfs with namenode darwin:9820
|
* list /data directory in dfs with namenode darwin:8020
|
||||||
*
|
*
|
||||||
* $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
|
* $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
|
||||||
* list /data directory in dfs with multiple conf files specified.
|
* list /data directory in dfs with multiple conf files specified.
|
||||||
|
|
|
@ -47,6 +47,6 @@ public class TestDelegateToFileSystem {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDefaultURIwithPort() throws Exception {
|
public void testDefaultURIwithPort() throws Exception {
|
||||||
testDefaultUriInternal("hdfs://dummyhost:9820");
|
testDefaultUriInternal("hdfs://dummyhost:8020");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort {
|
||||||
private static String TEST_FENCING_HOST = System.getProperty(
|
private static String TEST_FENCING_HOST = System.getProperty(
|
||||||
"test.TestSshFenceByTcpPort.host", "localhost");
|
"test.TestSshFenceByTcpPort.host", "localhost");
|
||||||
private static final String TEST_FENCING_PORT = System.getProperty(
|
private static final String TEST_FENCING_PORT = System.getProperty(
|
||||||
"test.TestSshFenceByTcpPort.port", "9820");
|
"test.TestSshFenceByTcpPort.port", "8020");
|
||||||
private static final String TEST_KEYFILE = System.getProperty(
|
private static final String TEST_KEYFILE = System.getProperty(
|
||||||
"test.TestSshFenceByTcpPort.key");
|
"test.TestSshFenceByTcpPort.key");
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ public interface HdfsClientConfigKeys {
|
||||||
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
|
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
|
||||||
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
|
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
|
||||||
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
|
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
|
||||||
int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
|
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
|
||||||
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
|
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
|
||||||
"dfs.namenode.kerberos.principal";
|
"dfs.namenode.kerberos.principal";
|
||||||
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
|
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
|
||||||
|
|
|
@ -70,10 +70,10 @@ public class TestRequestHedgingProxyProvider {
|
||||||
HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
|
HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
|
||||||
conf.set(
|
conf.set(
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
|
||||||
"machine1.foo.bar:9820");
|
"machine1.foo.bar:8020");
|
||||||
conf.set(
|
conf.set(
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
|
||||||
"machine2.foo.bar:9820");
|
"machine2.foo.bar:8020");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -236,7 +236,7 @@ public class TestRequestHedgingProxyProvider {
|
||||||
conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
|
conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
|
||||||
"nn1,nn2,nn3");
|
"nn1,nn2,nn3");
|
||||||
conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
|
conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
|
||||||
"machine3.foo.bar:9820");
|
"machine3.foo.bar:8020");
|
||||||
|
|
||||||
final AtomicInteger counter = new AtomicInteger(0);
|
final AtomicInteger counter = new AtomicInteger(0);
|
||||||
final int[] isGood = {1};
|
final int[] isGood = {1};
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
|
RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
|
||||||
the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
|
the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
|
||||||
dfs.namenode.rpc-address.EXAMPLENAMESERVICE
|
dfs.namenode.rpc-address.EXAMPLENAMESERVICE
|
||||||
The value of this property will take the form of nn-host1:rpc-port. The NameNode's default RPC port is 9820.
|
The value of this property will take the form of nn-host1:rpc-port. The NameNode's default RPC port is 8020.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
|
|
@ -119,15 +119,15 @@ The order in which you set these configurations is unimportant, but the values y
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
|
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
|
||||||
<value>machine1.example.com:9820</value>
|
<value>machine1.example.com:8020</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
|
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
|
||||||
<value>machine2.example.com:9820</value>
|
<value>machine2.example.com:8020</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
|
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
|
||||||
<value>machine3.example.com:9820</value>
|
<value>machine3.example.com:8020</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
**Note:** You may similarly configure the "**servicerpc-address**" setting if
|
**Note:** You may similarly configure the "**servicerpc-address**" setting if
|
||||||
|
|
|
@ -132,15 +132,15 @@ The order in which you set these configurations is unimportant, but the values y
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
|
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
|
||||||
<value>machine1.example.com:9820</value>
|
<value>machine1.example.com:8020</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
|
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
|
||||||
<value>machine2.example.com:9820</value>
|
<value>machine2.example.com:8020</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
|
<name>dfs.namenode.rpc-address.mycluster.nn3</name>
|
||||||
<value>machine3.example.com:9820</value>
|
<value>machine3.example.com:8020</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
**Note:** You may similarly configure the "**servicerpc-address**" setting if you so desire.
|
**Note:** You may similarly configure the "**servicerpc-address**" setting if you so desire.
|
||||||
|
|
|
@ -291,7 +291,7 @@ We are going to remove the file test1.
|
||||||
The comment below shows that the file has been moved to Trash directory.
|
The comment below shows that the file has been moved to Trash directory.
|
||||||
|
|
||||||
$ hadoop fs -rm -r delete/test1
|
$ hadoop fs -rm -r delete/test1
|
||||||
Moved: hdfs://localhost:9820/user/hadoop/delete/test1 to trash at: hdfs://localhost:9820/user/hadoop/.Trash/Current
|
Moved: hdfs://localhost:8020/user/hadoop/delete/test1 to trash at: hdfs://localhost:8020/user/hadoop/.Trash/Current
|
||||||
|
|
||||||
now we are going to remove the file with skipTrash option,
|
now we are going to remove the file with skipTrash option,
|
||||||
which will not send the file to Trash.It will be completely removed from HDFS.
|
which will not send the file to Trash.It will be completely removed from HDFS.
|
||||||
|
|
|
@ -100,7 +100,51 @@ The mount points of a mount table are specified in the standard Hadoop configura
|
||||||
</property>
|
</property>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
The authority following the `viewfs://` scheme in the URI is the mount table name. It is recommanded that the mount table of a cluster should be named by the cluster name. Then Hadoop system will look for a mount table with the name "clusterX" in the Hadoop configuration files. Operations arrange all gateways and service machines to contain the mount tables for ALL clusters such that, for each cluster, the default file system is set to the ViewFs mount table for that cluster as described above.
|
The authority following the `viewfs://` scheme in the URI is the mount table name. It is recommanded that the mount table of a cluster should be named by the cluster name. Then Hadoop system will look for a mount table with the name "clusterX" in the Hadoop configuration files. Operations arrange all gateways and service machines to contain the mount tables for ALL clusters such that, for each cluster, the default file system is set to the ViewFs mount table for that cluster as described above.
|
||||||
|
=======
|
||||||
|
The authority following the `viewfs://` scheme in the URI is the mount table name. It is recommended that the mount table of a cluster should be named by the cluster name. Then Hadoop system will look for a mount table with the name "clusterX" in the Hadoop configuration files. Operations arrange all gateways and service machines to contain the mount tables for ALL clusters such that, for each cluster, the default file system is set to the ViewFs mount table for that cluster as described above.
|
||||||
|
|
||||||
|
The mount points of a mount table are specified in the standard Hadoop configuration files. All the mount table config entries for `viewfs` are prefixed by `fs.viewfs.mounttable.`. The mount points that are linking other filesystems are specified using `link` tags. The recommendation is to have mount points name same as in the linked filesystem target locations. For all namespaces that are not configured in the mount table, we can have them fallback to a default filesystem via `linkFallback`.
|
||||||
|
|
||||||
|
In the below mount table configuration, namespace `/data` is linked to the filesystem `hdfs://nn1-clusterx.example.com:8020/data`, `/project` is linked to the filesystem `hdfs://nn2-clusterx.example.com:8020/project`. All namespaces that are not configured in the mount table, like `/logs` are linked to the filesystem `hdfs://nn5-clusterx.example.com:8020/home`.
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>fs.viewfs.mounttable.ClusterX.link./data</name>
|
||||||
|
<value>hdfs://nn1-clusterx.example.com:8020/data</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>fs.viewfs.mounttable.ClusterX.link./project</name>
|
||||||
|
<value>hdfs://nn2-clusterx.example.com:8020/project</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>fs.viewfs.mounttable.ClusterX.link./user</name>
|
||||||
|
<value>hdfs://nn3-clusterx.example.com:8020/user</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
|
||||||
|
<value>hdfs://nn4-clusterx.example.com:8020/tmp</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>fs.viewfs.mounttable.ClusterX.linkFallback</name>
|
||||||
|
<value>hdfs://nn5-clusterx.example.com:8020/home</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively we can have the mount table's root merged with the root of another filesystem via `linkMergeSlash`. In the below mount table configuration, ClusterY's root is merged with the root filesystem at `hdfs://nn1-clustery.example.com:8020`.
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>fs.viewfs.mounttable.ClusterY.linkMergeSlash</name>
|
||||||
|
<value>hdfs://nn1-clustery.example.com:8020/</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
```
|
||||||
|
>>>>>>> 4304fcd5bdf... HDFS-12990. Change default NameNode RPC port back to 8020. Contributed by Xiao Chen.
|
||||||
|
|
||||||
### Pathname Usage Patterns
|
### Pathname Usage Patterns
|
||||||
|
|
||||||
|
@ -197,11 +241,11 @@ The mount tables can be described in `core-site.xml` but it is better to use ind
|
||||||
|
|
||||||
In the file `mountTable.xml`, there is a definition of the mount table "ClusterX" for the hypothetical cluster that is a federation of the three namespace volumes managed by the three namenodes
|
In the file `mountTable.xml`, there is a definition of the mount table "ClusterX" for the hypothetical cluster that is a federation of the three namespace volumes managed by the three namenodes
|
||||||
|
|
||||||
1. nn1-clusterx.example.com:9820,
|
1. nn1-clusterx.example.com:8020,
|
||||||
2. nn2-clusterx.example.com:9820, and
|
2. nn2-clusterx.example.com:8020, and
|
||||||
3. nn3-clusterx.example.com:9820.
|
3. nn3-clusterx.example.com:8020.
|
||||||
|
|
||||||
Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:9820, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
|
Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.example.com:8020, and projects `/foo` and `/bar` are hosted on the other namenodes of the federated cluster. The home directory base path is set to `/home` so that each user can access its home directory using the getHomeDirectory() method defined in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<configuration>
|
<configuration>
|
||||||
|
@ -211,19 +255,19 @@ Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.ex
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>fs.viewfs.mounttable.ClusterX.link./home</name>
|
<name>fs.viewfs.mounttable.ClusterX.link./home</name>
|
||||||
<value>hdfs://nn1-clusterx.example.com:9820/home</value>
|
<value>hdfs://nn1-clusterx.example.com:8020/home</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
|
<name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
|
||||||
<value>hdfs://nn1-clusterx.example.com:9820/tmp</value>
|
<value>hdfs://nn1-clusterx.example.com:8020/tmp</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>fs.viewfs.mounttable.ClusterX.link./projects/foo</name>
|
<name>fs.viewfs.mounttable.ClusterX.link./projects/foo</name>
|
||||||
<value>hdfs://nn2-clusterx.example.com:9820/projects/foo</value>
|
<value>hdfs://nn2-clusterx.example.com:8020/projects/foo</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>fs.viewfs.mounttable.ClusterX.link./projects/bar</name>
|
<name>fs.viewfs.mounttable.ClusterX.link./projects/bar</name>
|
||||||
<value>hdfs://nn3-clusterx.example.com:9820/projects/bar</value>
|
<value>hdfs://nn3-clusterx.example.com:8020/projects/bar</value>
|
||||||
</property>
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
```
|
```
|
||||||
|
|
|
@ -312,7 +312,7 @@ public class TestDFSClientFailover {
|
||||||
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + service,
|
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + service,
|
||||||
namenode);
|
namenode);
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + service + "."
|
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + service + "."
|
||||||
+ namenode, "localhost:9820");
|
+ namenode, "localhost:8020");
|
||||||
|
|
||||||
// call createProxy implicitly and explicitly
|
// call createProxy implicitly and explicitly
|
||||||
Path p = new Path("/");
|
Path p = new Path("/");
|
||||||
|
|
|
@ -83,9 +83,9 @@ import com.google.common.collect.Sets;
|
||||||
|
|
||||||
public class TestDFSUtil {
|
public class TestDFSUtil {
|
||||||
|
|
||||||
static final String NS1_NN_ADDR = "ns1-nn.example.com:9820";
|
static final String NS1_NN_ADDR = "ns1-nn.example.com:8020";
|
||||||
static final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820";
|
static final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
|
||||||
static final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820";
|
static final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reset to default UGI settings since some tests change them.
|
* Reset to default UGI settings since some tests change them.
|
||||||
|
@ -477,7 +477,7 @@ public class TestDFSUtil {
|
||||||
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
|
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
|
||||||
|
|
||||||
URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
|
URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
|
||||||
"localhost", 9820), conf, "http");
|
"localhost", 8020), conf, "http");
|
||||||
assertEquals(
|
assertEquals(
|
||||||
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
|
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
|
||||||
httpAddress);
|
httpAddress);
|
||||||
|
@ -487,10 +487,10 @@ public class TestDFSUtil {
|
||||||
public void testHANameNodesWithFederation() throws URISyntaxException {
|
public void testHANameNodesWithFederation() throws URISyntaxException {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
|
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
||||||
final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
|
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
|
||||||
final String NS2_NN1_HOST = "ns2-nn1.example.com:9820";
|
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
|
||||||
final String NS2_NN2_HOST = "ns2-nn2.example.com:9820";
|
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
|
||||||
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
|
||||||
|
|
||||||
// Two nameservices, each with two NNs.
|
// Two nameservices, each with two NNs.
|
||||||
|
@ -554,9 +554,9 @@ public class TestDFSUtil {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
// One nameservice with two NNs
|
// One nameservice with two NNs
|
||||||
final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
|
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
||||||
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
|
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
|
||||||
final String NS1_NN2_HOST = "ns1-nn1.example.com:9820";
|
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
|
||||||
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
|
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
|
||||||
|
|
||||||
conf.set(DFS_NAMESERVICES, "ns1");
|
conf.set(DFS_NAMESERVICES, "ns1");
|
||||||
|
@ -640,10 +640,10 @@ public class TestDFSUtil {
|
||||||
public void testGetNNUris() throws Exception {
|
public void testGetNNUris() throws Exception {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
final String NS2_NN_ADDR = "ns2-nn.example.com:9820";
|
final String NS2_NN_ADDR = "ns2-nn.example.com:8020";
|
||||||
final String NN1_ADDR = "nn.example.com:9820";
|
final String NN1_ADDR = "nn.example.com:8020";
|
||||||
final String NN1_SRVC_ADDR = "nn.example.com:9821";
|
final String NN1_SRVC_ADDR = "nn.example.com:9821";
|
||||||
final String NN2_ADDR = "nn2.example.com:9820";
|
final String NN2_ADDR = "nn2.example.com:8020";
|
||||||
|
|
||||||
conf.set(DFS_NAMESERVICES, "ns1");
|
conf.set(DFS_NAMESERVICES, "ns1");
|
||||||
conf.set(DFSUtil.addKeySuffixes(
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
@ -821,7 +821,7 @@ public class TestDFSUtil {
|
||||||
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
|
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
|
||||||
// it will automatically convert it to hostname
|
// it will automatically convert it to hostname
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820");
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
|
||||||
Collection<URI> uris = getInternalNameServiceUris(conf);
|
Collection<URI> uris = getInternalNameServiceUris(conf);
|
||||||
assertEquals(1, uris.size());
|
assertEquals(1, uris.size());
|
||||||
for (URI uri : uris) {
|
for (URI uri : uris) {
|
||||||
|
|
|
@ -1148,7 +1148,7 @@ public class TestQuota {
|
||||||
@Test
|
@Test
|
||||||
public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception {
|
public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820");
|
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
|
||||||
DFSAdmin admin = new DFSAdmin(conf);
|
DFSAdmin admin = new DFSAdmin(conf);
|
||||||
ByteArrayOutputStream err = new ByteArrayOutputStream();
|
ByteArrayOutputStream err = new ByteArrayOutputStream();
|
||||||
PrintStream oldErr = System.err;
|
PrintStream oldErr = System.err;
|
||||||
|
|
|
@ -353,7 +353,7 @@ public class TestBlockTokenWithDFS {
|
||||||
try {
|
try {
|
||||||
// prefer non-ephemeral port to avoid port collision on restartNameNode
|
// prefer non-ephemeral port to avoid port collision on restartNameNode
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nameNodePort(ServerSocketUtil.getPort(19820, 100))
|
.nameNodePort(ServerSocketUtil.getPort(18020, 100))
|
||||||
.nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
|
.nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
|
||||||
.numDataNodes(numDataNodes)
|
.numDataNodes(numDataNodes)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -79,7 +79,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nameNodePort(ServerSocketUtil.getPort(19820, 100))
|
.nameNodePort(ServerSocketUtil.getPort(18020, 100))
|
||||||
.nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
|
.nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
|
||||||
.numDataNodes(numDNs)
|
.numDataNodes(numDNs)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class TestBlockPoolManager {
|
||||||
public void testSimpleSingleNS() throws Exception {
|
public void testSimpleSingleNS() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,
|
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,
|
||||||
"hdfs://mock1:9820");
|
"hdfs://mock1:8020");
|
||||||
bpm.refreshNamenodes(conf);
|
bpm.refreshNamenodes(conf);
|
||||||
assertEquals("create #1\n", log.toString());
|
assertEquals("create #1\n", log.toString());
|
||||||
}
|
}
|
||||||
|
@ -110,8 +110,8 @@ public class TestBlockPoolManager {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
|
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
|
||||||
"ns1,ns2");
|
"ns1,ns2");
|
||||||
addNN(conf, "ns1", "mock1:9820");
|
addNN(conf, "ns1", "mock1:8020");
|
||||||
addNN(conf, "ns2", "mock1:9820");
|
addNN(conf, "ns2", "mock1:8020");
|
||||||
bpm.refreshNamenodes(conf);
|
bpm.refreshNamenodes(conf);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"create #1\n" +
|
"create #1\n" +
|
||||||
|
@ -141,9 +141,9 @@ public class TestBlockPoolManager {
|
||||||
public void testInternalNameService() throws Exception {
|
public void testInternalNameService() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3");
|
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3");
|
||||||
addNN(conf, "ns1", "mock1:9820");
|
addNN(conf, "ns1", "mock1:8020");
|
||||||
addNN(conf, "ns2", "mock1:9820");
|
addNN(conf, "ns2", "mock1:8020");
|
||||||
addNN(conf, "ns3", "mock1:9820");
|
addNN(conf, "ns3", "mock1:8020");
|
||||||
conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
|
conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
|
||||||
bpm.refreshNamenodes(conf);
|
bpm.refreshNamenodes(conf);
|
||||||
assertEquals("create #1\n", log.toString());
|
assertEquals("create #1\n", log.toString());
|
||||||
|
|
|
@ -163,8 +163,8 @@ public class TestAllowFormat {
|
||||||
// is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
|
// is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
|
||||||
// is considered.
|
// is considered.
|
||||||
String localhost = "127.0.0.1";
|
String localhost = "127.0.0.1";
|
||||||
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 9820);
|
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
|
||||||
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9820);
|
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 8020);
|
||||||
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
||||||
|
|
||||||
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
|
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
|
|
|
@ -348,7 +348,6 @@ public class TestGetConf {
|
||||||
verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
|
verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
|
||||||
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
|
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=10000)
|
@Test(timeout=10000)
|
||||||
public void testGetSpecificKey() throws Exception {
|
public void testGetSpecificKey() throws Exception {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -63,8 +63,8 @@ $H3 Basic Usage
|
||||||
|
|
||||||
The most common invocation of DistCp is an inter-cluster copy:
|
The most common invocation of DistCp is an inter-cluster copy:
|
||||||
|
|
||||||
bash$ hadoop distcp hdfs://nn1:9820/foo/bar \
|
bash$ hadoop distcp hdfs://nn1:8020/foo/bar \
|
||||||
hdfs://nn2:9820/bar/foo
|
hdfs://nn2:8020/bar/foo
|
||||||
|
|
||||||
This will expand the namespace under `/foo/bar` on nn1 into a temporary file,
|
This will expand the namespace under `/foo/bar` on nn1 into a temporary file,
|
||||||
partition its contents among a set of map tasks, and start a copy on each
|
partition its contents among a set of map tasks, and start a copy on each
|
||||||
|
@ -72,19 +72,19 @@ $H3 Basic Usage
|
||||||
|
|
||||||
One can also specify multiple source directories on the command line:
|
One can also specify multiple source directories on the command line:
|
||||||
|
|
||||||
bash$ hadoop distcp hdfs://nn1:9820/foo/a \
|
bash$ hadoop distcp hdfs://nn1:8020/foo/a \
|
||||||
hdfs://nn1:9820/foo/b \
|
hdfs://nn1:8020/foo/b \
|
||||||
hdfs://nn2:9820/bar/foo
|
hdfs://nn2:8020/bar/foo
|
||||||
|
|
||||||
Or, equivalently, from a file using the -f option:
|
Or, equivalently, from a file using the -f option:
|
||||||
|
|
||||||
bash$ hadoop distcp -f hdfs://nn1:9820/srclist \
|
bash$ hadoop distcp -f hdfs://nn1:8020/srclist \
|
||||||
hdfs://nn2:9820/bar/foo
|
hdfs://nn2:8020/bar/foo
|
||||||
|
|
||||||
Where `srclist` contains
|
Where `srclist` contains
|
||||||
|
|
||||||
hdfs://nn1:9820/foo/a
|
hdfs://nn1:8020/foo/a
|
||||||
hdfs://nn1:9820/foo/b
|
hdfs://nn1:8020/foo/b
|
||||||
|
|
||||||
When copying from multiple sources, DistCp will abort the copy with an error
|
When copying from multiple sources, DistCp will abort the copy with an error
|
||||||
message if two sources collide, but collisions at the destination are
|
message if two sources collide, but collisions at the destination are
|
||||||
|
@ -126,35 +126,35 @@ $H3 Update and Overwrite
|
||||||
Consider a copy from `/source/first/` and `/source/second/` to `/target/`,
|
Consider a copy from `/source/first/` and `/source/second/` to `/target/`,
|
||||||
where the source paths have the following contents:
|
where the source paths have the following contents:
|
||||||
|
|
||||||
hdfs://nn1:9820/source/first/1
|
hdfs://nn1:8020/source/first/1
|
||||||
hdfs://nn1:9820/source/first/2
|
hdfs://nn1:8020/source/first/2
|
||||||
hdfs://nn1:9820/source/second/10
|
hdfs://nn1:8020/source/second/10
|
||||||
hdfs://nn1:9820/source/second/20
|
hdfs://nn1:8020/source/second/20
|
||||||
|
|
||||||
When DistCp is invoked without `-update` or `-overwrite`, the DistCp defaults
|
When DistCp is invoked without `-update` or `-overwrite`, the DistCp defaults
|
||||||
would create directories `first/` and `second/`, under `/target`. Thus:
|
would create directories `first/` and `second/`, under `/target`. Thus:
|
||||||
|
|
||||||
distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
|
distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
|
||||||
|
|
||||||
would yield the following contents in `/target`:
|
would yield the following contents in `/target`:
|
||||||
|
|
||||||
hdfs://nn2:9820/target/first/1
|
hdfs://nn2:8020/target/first/1
|
||||||
hdfs://nn2:9820/target/first/2
|
hdfs://nn2:8020/target/first/2
|
||||||
hdfs://nn2:9820/target/second/10
|
hdfs://nn2:8020/target/second/10
|
||||||
hdfs://nn2:9820/target/second/20
|
hdfs://nn2:8020/target/second/20
|
||||||
|
|
||||||
When either `-update` or `-overwrite` is specified, the **contents** of the
|
When either `-update` or `-overwrite` is specified, the **contents** of the
|
||||||
source-directories are copied to target, and not the source directories
|
source-directories are copied to target, and not the source directories
|
||||||
themselves. Thus:
|
themselves. Thus:
|
||||||
|
|
||||||
distcp -update hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
|
distcp -update hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
|
||||||
|
|
||||||
would yield the following contents in `/target`:
|
would yield the following contents in `/target`:
|
||||||
|
|
||||||
hdfs://nn2:9820/target/1
|
hdfs://nn2:8020/target/1
|
||||||
hdfs://nn2:9820/target/2
|
hdfs://nn2:8020/target/2
|
||||||
hdfs://nn2:9820/target/10
|
hdfs://nn2:8020/target/10
|
||||||
hdfs://nn2:9820/target/20
|
hdfs://nn2:8020/target/20
|
||||||
|
|
||||||
By extension, if both source folders contained a file with the same name
|
By extension, if both source folders contained a file with the same name
|
||||||
(say, `0`), then both sources would map an entry to `/target/0` at the
|
(say, `0`), then both sources would map an entry to `/target/0` at the
|
||||||
|
@ -162,27 +162,27 @@ $H3 Update and Overwrite
|
||||||
|
|
||||||
Now, consider the following copy operation:
|
Now, consider the following copy operation:
|
||||||
|
|
||||||
distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target
|
distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second hdfs://nn2:8020/target
|
||||||
|
|
||||||
With sources/sizes:
|
With sources/sizes:
|
||||||
|
|
||||||
hdfs://nn1:9820/source/first/1 32
|
hdfs://nn1:8020/source/first/1 32
|
||||||
hdfs://nn1:9820/source/first/2 32
|
hdfs://nn1:8020/source/first/2 32
|
||||||
hdfs://nn1:9820/source/second/10 64
|
hdfs://nn1:8020/source/second/10 64
|
||||||
hdfs://nn1:9820/source/second/20 32
|
hdfs://nn1:8020/source/second/20 32
|
||||||
|
|
||||||
And destination/sizes:
|
And destination/sizes:
|
||||||
|
|
||||||
hdfs://nn2:9820/target/1 32
|
hdfs://nn2:8020/target/1 32
|
||||||
hdfs://nn2:9820/target/10 32
|
hdfs://nn2:8020/target/10 32
|
||||||
hdfs://nn2:9820/target/20 64
|
hdfs://nn2:8020/target/20 64
|
||||||
|
|
||||||
Will effect:
|
Will effect:
|
||||||
|
|
||||||
hdfs://nn2:9820/target/1 32
|
hdfs://nn2:8020/target/1 32
|
||||||
hdfs://nn2:9820/target/2 32
|
hdfs://nn2:8020/target/2 32
|
||||||
hdfs://nn2:9820/target/10 64
|
hdfs://nn2:8020/target/10 64
|
||||||
hdfs://nn2:9820/target/20 32
|
hdfs://nn2:8020/target/20 32
|
||||||
|
|
||||||
`1` is skipped because the file-length and contents match. `2` is copied
|
`1` is skipped because the file-length and contents match. `2` is copied
|
||||||
because it doesn't exist at the target. `10` and `20` are overwritten since
|
because it doesn't exist at the target. `10` and `20` are overwritten since
|
||||||
|
|
|
@ -37,36 +37,36 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testParseIgnoreFailure() {
|
public void testParseIgnoreFailure() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldIgnoreFailures());
|
Assert.assertFalse(options.shouldIgnoreFailures());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-i",
|
"-i",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldIgnoreFailures());
|
Assert.assertTrue(options.shouldIgnoreFailures());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testParseOverwrite() {
|
public void testParseOverwrite() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldOverwrite());
|
Assert.assertFalse(options.shouldOverwrite());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-overwrite",
|
"-overwrite",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldOverwrite());
|
Assert.assertTrue(options.shouldOverwrite());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-update",
|
"-update",
|
||||||
"-overwrite",
|
"-overwrite",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Update and overwrite aren't allowed together");
|
Assert.fail("Update and overwrite aren't allowed together");
|
||||||
} catch (IllegalArgumentException ignore) {
|
} catch (IllegalArgumentException ignore) {
|
||||||
}
|
}
|
||||||
|
@ -75,44 +75,44 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testLogPath() {
|
public void testLogPath() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertNull(options.getLogPath());
|
Assert.assertNull(options.getLogPath());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-log",
|
"-log",
|
||||||
"hdfs://localhost:9820/logs",
|
"hdfs://localhost:8020/logs",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:9820/logs"));
|
Assert.assertEquals(options.getLogPath(), new Path("hdfs://localhost:8020/logs"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testParseBlokcing() {
|
public void testParseBlokcing() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldBlock());
|
Assert.assertTrue(options.shouldBlock());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-async",
|
"-async",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldBlock());
|
Assert.assertFalse(options.shouldBlock());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testParsebandwidth() {
|
public void testParsebandwidth() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getMapBandwidth(), 0, DELTA);
|
Assert.assertEquals(options.getMapBandwidth(), 0, DELTA);
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-bandwidth",
|
"-bandwidth",
|
||||||
"11.2",
|
"11.2",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getMapBandwidth(), 11.2, DELTA);
|
Assert.assertEquals(options.getMapBandwidth(), 11.2, DELTA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,8 +121,8 @@ public class TestOptionsParser {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-bandwidth",
|
"-bandwidth",
|
||||||
"-11",
|
"-11",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected=IllegalArgumentException.class)
|
@Test(expected=IllegalArgumentException.class)
|
||||||
|
@ -130,22 +130,22 @@ public class TestOptionsParser {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-bandwidth",
|
"-bandwidth",
|
||||||
"0",
|
"0",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testParseSkipCRC() {
|
public void testParseSkipCRC() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldSkipCRC());
|
Assert.assertFalse(options.shouldSkipCRC());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-update",
|
"-update",
|
||||||
"-skipcrccheck",
|
"-skipcrccheck",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldSyncFolder());
|
Assert.assertTrue(options.shouldSyncFolder());
|
||||||
Assert.assertTrue(options.shouldSkipCRC());
|
Assert.assertTrue(options.shouldSkipCRC());
|
||||||
}
|
}
|
||||||
|
@ -153,22 +153,22 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testParseAtomicCommit() {
|
public void testParseAtomicCommit() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldAtomicCommit());
|
Assert.assertFalse(options.shouldAtomicCommit());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-atomic",
|
"-atomic",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldAtomicCommit());
|
Assert.assertTrue(options.shouldAtomicCommit());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-atomic",
|
"-atomic",
|
||||||
"-update",
|
"-update",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Atomic and sync folders were allowed");
|
Assert.fail("Atomic and sync folders were allowed");
|
||||||
} catch (IllegalArgumentException ignore) { }
|
} catch (IllegalArgumentException ignore) { }
|
||||||
}
|
}
|
||||||
|
@ -176,30 +176,30 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testParseWorkPath() {
|
public void testParseWorkPath() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertNull(options.getAtomicWorkPath());
|
Assert.assertNull(options.getAtomicWorkPath());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-atomic",
|
"-atomic",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertNull(options.getAtomicWorkPath());
|
Assert.assertNull(options.getAtomicWorkPath());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-atomic",
|
"-atomic",
|
||||||
"-tmp",
|
"-tmp",
|
||||||
"hdfs://localhost:9820/work",
|
"hdfs://localhost:8020/work",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:9820/work"));
|
Assert.assertEquals(options.getAtomicWorkPath(), new Path("hdfs://localhost:8020/work"));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-tmp",
|
"-tmp",
|
||||||
"hdfs://localhost:9820/work",
|
"hdfs://localhost:8020/work",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("work path was allowed without -atomic switch");
|
Assert.fail("work path was allowed without -atomic switch");
|
||||||
} catch (IllegalArgumentException ignore) {}
|
} catch (IllegalArgumentException ignore) {}
|
||||||
}
|
}
|
||||||
|
@ -207,37 +207,37 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testParseSyncFolders() {
|
public void testParseSyncFolders() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldSyncFolder());
|
Assert.assertFalse(options.shouldSyncFolder());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-update",
|
"-update",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldSyncFolder());
|
Assert.assertTrue(options.shouldSyncFolder());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testParseDeleteMissing() {
|
public void testParseDeleteMissing() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldDeleteMissing());
|
Assert.assertFalse(options.shouldDeleteMissing());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-update",
|
"-update",
|
||||||
"-delete",
|
"-delete",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldSyncFolder());
|
Assert.assertTrue(options.shouldSyncFolder());
|
||||||
Assert.assertTrue(options.shouldDeleteMissing());
|
Assert.assertTrue(options.shouldDeleteMissing());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-overwrite",
|
"-overwrite",
|
||||||
"-delete",
|
"-delete",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldOverwrite());
|
Assert.assertTrue(options.shouldOverwrite());
|
||||||
Assert.assertTrue(options.shouldDeleteMissing());
|
Assert.assertTrue(options.shouldDeleteMissing());
|
||||||
|
|
||||||
|
@ -245,8 +245,8 @@ public class TestOptionsParser {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-atomic",
|
"-atomic",
|
||||||
"-delete",
|
"-delete",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Atomic and delete folders were allowed");
|
Assert.fail("Atomic and delete folders were allowed");
|
||||||
} catch (IllegalArgumentException ignore) { }
|
} catch (IllegalArgumentException ignore) { }
|
||||||
}
|
}
|
||||||
|
@ -254,38 +254,38 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testParseMaps() {
|
public void testParseMaps() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getMaxMaps(), DistCpConstants.DEFAULT_MAPS);
|
Assert.assertEquals(options.getMaxMaps(), DistCpConstants.DEFAULT_MAPS);
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-m",
|
"-m",
|
||||||
"1",
|
"1",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getMaxMaps(), 1);
|
Assert.assertEquals(options.getMaxMaps(), 1);
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-m",
|
"-m",
|
||||||
"0",
|
"0",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getMaxMaps(), 1);
|
Assert.assertEquals(options.getMaxMaps(), 1);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-m",
|
"-m",
|
||||||
"hello",
|
"hello",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Non numberic map parsed");
|
Assert.fail("Non numberic map parsed");
|
||||||
} catch (IllegalArgumentException ignore) { }
|
} catch (IllegalArgumentException ignore) { }
|
||||||
|
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-mapredXslConf",
|
"-mapredXslConf",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Non numberic map parsed");
|
Assert.fail("Non numberic map parsed");
|
||||||
} catch (IllegalArgumentException ignore) { }
|
} catch (IllegalArgumentException ignore) { }
|
||||||
}
|
}
|
||||||
|
@ -293,8 +293,8 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testParseNumListstatusThreads() {
|
public void testParseNumListstatusThreads() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
// If command line argument isn't set, we expect .getNumListstatusThreads
|
// If command line argument isn't set, we expect .getNumListstatusThreads
|
||||||
// option to be zero (so that we know when to override conf properties).
|
// option to be zero (so that we know when to override conf properties).
|
||||||
Assert.assertEquals(0, options.getNumListstatusThreads());
|
Assert.assertEquals(0, options.getNumListstatusThreads());
|
||||||
|
@ -302,23 +302,23 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"--numListstatusThreads",
|
"--numListstatusThreads",
|
||||||
"12",
|
"12",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(12, options.getNumListstatusThreads());
|
Assert.assertEquals(12, options.getNumListstatusThreads());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"--numListstatusThreads",
|
"--numListstatusThreads",
|
||||||
"0",
|
"0",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(0, options.getNumListstatusThreads());
|
Assert.assertEquals(0, options.getNumListstatusThreads());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"--numListstatusThreads",
|
"--numListstatusThreads",
|
||||||
"hello",
|
"hello",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Non numberic numListstatusThreads parsed");
|
Assert.fail("Non numberic numListstatusThreads parsed");
|
||||||
} catch (IllegalArgumentException ignore) { }
|
} catch (IllegalArgumentException ignore) { }
|
||||||
|
|
||||||
|
@ -326,8 +326,8 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"--numListstatusThreads",
|
"--numListstatusThreads",
|
||||||
"100",
|
"100",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(DistCpOptions.MAX_NUM_LISTSTATUS_THREADS,
|
Assert.assertEquals(DistCpOptions.MAX_NUM_LISTSTATUS_THREADS,
|
||||||
options.getNumListstatusThreads());
|
options.getNumListstatusThreads());
|
||||||
}
|
}
|
||||||
|
@ -336,10 +336,10 @@ public class TestOptionsParser {
|
||||||
public void testSourceListing() {
|
public void testSourceListing() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getSourceFileListing(),
|
Assert.assertEquals(options.getSourceFileListing(),
|
||||||
new Path("hdfs://localhost:9820/source/first"));
|
new Path("hdfs://localhost:8020/source/first"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -347,9 +347,9 @@ public class TestOptionsParser {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Both source listing & source paths allowed");
|
Assert.fail("Both source listing & source paths allowed");
|
||||||
} catch (IllegalArgumentException ignore) {}
|
} catch (IllegalArgumentException ignore) {}
|
||||||
}
|
}
|
||||||
|
@ -358,7 +358,7 @@ public class TestOptionsParser {
|
||||||
public void testMissingSourceInfo() {
|
public void testMissingSourceInfo() {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.fail("Neither source listing not source paths present");
|
Assert.fail("Neither source listing not source paths present");
|
||||||
} catch (IllegalArgumentException ignore) {}
|
} catch (IllegalArgumentException ignore) {}
|
||||||
}
|
}
|
||||||
|
@ -367,7 +367,7 @@ public class TestOptionsParser {
|
||||||
public void testMissingTarget() {
|
public void testMissingTarget() {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-f", "hdfs://localhost:9820/source"});
|
"-f", "hdfs://localhost:8020/source"});
|
||||||
Assert.fail("Missing target allowed");
|
Assert.fail("Missing target allowed");
|
||||||
} catch (IllegalArgumentException ignore) {}
|
} catch (IllegalArgumentException ignore) {}
|
||||||
}
|
}
|
||||||
|
@ -376,7 +376,7 @@ public class TestOptionsParser {
|
||||||
public void testInvalidArgs() {
|
public void testInvalidArgs() {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-m", "-f", "hdfs://localhost:9820/source"});
|
"-m", "-f", "hdfs://localhost:8020/source"});
|
||||||
Assert.fail("Missing map value");
|
Assert.fail("Missing map value");
|
||||||
} catch (IllegalArgumentException ignore) {}
|
} catch (IllegalArgumentException ignore) {}
|
||||||
}
|
}
|
||||||
|
@ -387,14 +387,14 @@ public class TestOptionsParser {
|
||||||
"-strategy",
|
"-strategy",
|
||||||
"dynamic",
|
"dynamic",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getCopyStrategy(), "dynamic");
|
Assert.assertEquals(options.getCopyStrategy(), "dynamic");
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getCopyStrategy(), DistCpConstants.UNIFORMSIZE);
|
Assert.assertEquals(options.getCopyStrategy(), DistCpConstants.UNIFORMSIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,17 +402,17 @@ public class TestOptionsParser {
|
||||||
public void testTargetPath() {
|
public void testTargetPath() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:9820/target/"));
|
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:8020/target/"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPreserve() {
|
public void testPreserve() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
||||||
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
|
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
|
||||||
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
|
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
|
||||||
|
@ -423,8 +423,8 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-p",
|
"-p",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
||||||
|
@ -436,8 +436,8 @@ public class TestOptionsParser {
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-p",
|
"-p",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
||||||
|
@ -450,8 +450,8 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-pbr",
|
"-pbr",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
||||||
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
|
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
|
||||||
|
@ -464,8 +464,8 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-pbrgup",
|
"-pbrgup",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
||||||
|
@ -478,8 +478,8 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-pbrgupcaxt",
|
"-pbrgupcaxt",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
|
||||||
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
|
||||||
|
@ -493,8 +493,8 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-pc",
|
"-pc",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
|
||||||
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
|
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
|
||||||
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
|
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
|
||||||
|
@ -507,8 +507,8 @@ public class TestOptionsParser {
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-p",
|
"-p",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 2,
|
Assert.assertEquals(DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 2,
|
||||||
options.getPreserveAttributes().size());
|
options.getPreserveAttributes().size());
|
||||||
|
|
||||||
|
@ -516,15 +516,15 @@ public class TestOptionsParser {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-pabcd",
|
"-pabcd",
|
||||||
"-f",
|
"-f",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target"});
|
"hdfs://localhost:8020/target"});
|
||||||
Assert.fail("Invalid preserve attribute");
|
Assert.fail("Invalid preserve attribute");
|
||||||
}
|
}
|
||||||
catch (NoSuchElementException ignore) {}
|
catch (NoSuchElementException ignore) {}
|
||||||
|
|
||||||
Builder builder = new DistCpOptions.Builder(
|
Builder builder = new DistCpOptions.Builder(
|
||||||
new Path("hdfs://localhost:9820/source/first"),
|
new Path("hdfs://localhost:8020/source/first"),
|
||||||
new Path("hdfs://localhost:9820/target/"));
|
new Path("hdfs://localhost:8020/target/"));
|
||||||
Assert.assertFalse(
|
Assert.assertFalse(
|
||||||
builder.build().shouldPreserve(FileAttribute.PERMISSION));
|
builder.build().shouldPreserve(FileAttribute.PERMISSION));
|
||||||
builder.preserve(FileAttribute.PERMISSION);
|
builder.preserve(FileAttribute.PERMISSION);
|
||||||
|
@ -552,8 +552,8 @@ public class TestOptionsParser {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"-atomic",
|
"-atomic",
|
||||||
"-i",
|
"-i",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
options.appendToConf(conf);
|
options.appendToConf(conf);
|
||||||
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
|
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
|
||||||
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
|
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
|
||||||
|
@ -570,8 +570,8 @@ public class TestOptionsParser {
|
||||||
"-pu",
|
"-pu",
|
||||||
"-bandwidth",
|
"-bandwidth",
|
||||||
"11.2",
|
"11.2",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
options.appendToConf(conf);
|
options.appendToConf(conf);
|
||||||
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
|
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
|
||||||
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
|
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
|
||||||
|
@ -644,8 +644,8 @@ public class TestOptionsParser {
|
||||||
DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
|
DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
|
||||||
|
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
|
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
|
||||||
"-append", "hdfs://localhost:9820/source/first",
|
"-append", "hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
options.appendToConf(conf);
|
options.appendToConf(conf);
|
||||||
Assert.assertTrue(conf.getBoolean(
|
Assert.assertTrue(conf.getBoolean(
|
||||||
DistCpOptionSwitch.APPEND.getConfigLabel(), false));
|
DistCpOptionSwitch.APPEND.getConfigLabel(), false));
|
||||||
|
@ -655,8 +655,8 @@ public class TestOptionsParser {
|
||||||
// make sure -append is only valid when -update is specified
|
// make sure -append is only valid when -update is specified
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] { "-append",
|
OptionsParser.parse(new String[] { "-append",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail("Append should fail if update option is not specified");
|
fail("Append should fail if update option is not specified");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
GenericTestUtils.assertExceptionContains(
|
GenericTestUtils.assertExceptionContains(
|
||||||
|
@ -667,8 +667,8 @@ public class TestOptionsParser {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-append", "-update", "-skipcrccheck",
|
"-append", "-update", "-skipcrccheck",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail("Append should fail if skipCrc option is specified");
|
fail("Append should fail if skipCrc option is specified");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
GenericTestUtils.assertExceptionContains(
|
GenericTestUtils.assertExceptionContains(
|
||||||
|
@ -687,8 +687,8 @@ public class TestOptionsParser {
|
||||||
|
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
|
DistCpOptions options = OptionsParser.parse(new String[] { "-update",
|
||||||
optionStr, "s1", "s2",
|
optionStr, "s1", "s2",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
options.appendToConf(conf);
|
options.appendToConf(conf);
|
||||||
Assert.assertTrue(conf.getBoolean(optionLabel, false));
|
Assert.assertTrue(conf.getBoolean(optionLabel, false));
|
||||||
Assert.assertTrue(isDiff?
|
Assert.assertTrue(isDiff?
|
||||||
|
@ -698,8 +698,8 @@ public class TestOptionsParser {
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
optionStr, "s1", ".", "-update",
|
optionStr, "s1", ".", "-update",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
options.appendToConf(conf);
|
options.appendToConf(conf);
|
||||||
Assert.assertTrue(conf.getBoolean(optionLabel, false));
|
Assert.assertTrue(conf.getBoolean(optionLabel, false));
|
||||||
Assert.assertTrue(isDiff?
|
Assert.assertTrue(isDiff?
|
||||||
|
@ -710,8 +710,8 @@ public class TestOptionsParser {
|
||||||
// -diff/-rdiff requires two option values
|
// -diff/-rdiff requires two option values
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {optionStr, "s1", "-update",
|
OptionsParser.parse(new String[] {optionStr, "s1", "-update",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail(optionStr + " should fail with only one snapshot name");
|
fail(optionStr + " should fail with only one snapshot name");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
GenericTestUtils.assertExceptionContains(
|
GenericTestUtils.assertExceptionContains(
|
||||||
|
@ -721,8 +721,8 @@ public class TestOptionsParser {
|
||||||
// make sure -diff/-rdiff is only valid when -update is specified
|
// make sure -diff/-rdiff is only valid when -update is specified
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {optionStr, "s1", "s2",
|
OptionsParser.parse(new String[] {optionStr, "s1", "s2",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail(optionStr + " should fail if -update option is not specified");
|
fail(optionStr + " should fail if -update option is not specified");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
GenericTestUtils.assertExceptionContains(
|
GenericTestUtils.assertExceptionContains(
|
||||||
|
@ -732,8 +732,8 @@ public class TestOptionsParser {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-diff", "s1", "s2", "-update", "-delete",
|
"-diff", "s1", "s2", "-update", "-delete",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
|
fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
assertExceptionContains(
|
assertExceptionContains(
|
||||||
|
@ -743,8 +743,8 @@ public class TestOptionsParser {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {
|
OptionsParser.parse(new String[] {
|
||||||
"-diff", "s1", "s2", "-delete",
|
"-diff", "s1", "s2", "-delete",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
|
fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
assertExceptionContains(
|
assertExceptionContains(
|
||||||
|
@ -754,8 +754,8 @@ public class TestOptionsParser {
|
||||||
try {
|
try {
|
||||||
OptionsParser.parse(new String[] {optionStr, "s1", "s2",
|
OptionsParser.parse(new String[] {optionStr, "s1", "s2",
|
||||||
"-delete", "-overwrite",
|
"-delete", "-overwrite",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail("Should fail as -delete and -diff are mutually exclusive");
|
fail("Should fail as -delete and -diff are mutually exclusive");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
assertExceptionContains(
|
assertExceptionContains(
|
||||||
|
@ -768,8 +768,8 @@ public class TestOptionsParser {
|
||||||
optionStr, "s1", "s2",
|
optionStr, "s1", "s2",
|
||||||
optionStrOther, "s2", "s1",
|
optionStrOther, "s2", "s1",
|
||||||
"-update",
|
"-update",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/" });
|
"hdfs://localhost:8020/target/" });
|
||||||
fail(optionStr + " should fail if " + optionStrOther
|
fail(optionStr + " should fail if " + optionStrOther
|
||||||
+ " is also specified");
|
+ " is also specified");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
|
@ -791,15 +791,15 @@ public class TestOptionsParser {
|
||||||
@Test
|
@Test
|
||||||
public void testExclusionsOption() {
|
public void testExclusionsOption() {
|
||||||
DistCpOptions options = OptionsParser.parse(new String[] {
|
DistCpOptions options = OptionsParser.parse(new String[] {
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertNull(options.getFiltersFile());
|
Assert.assertNull(options.getFiltersFile());
|
||||||
|
|
||||||
options = OptionsParser.parse(new String[] {
|
options = OptionsParser.parse(new String[] {
|
||||||
"-filters",
|
"-filters",
|
||||||
"/tmp/filters.txt",
|
"/tmp/filters.txt",
|
||||||
"hdfs://localhost:9820/source/first",
|
"hdfs://localhost:8020/source/first",
|
||||||
"hdfs://localhost:9820/target/"});
|
"hdfs://localhost:8020/target/"});
|
||||||
Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt");
|
Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,7 +165,7 @@ Hadoop uses URIs to refer to files within a filesystem. Some common examples are
|
||||||
|
|
||||||
local://etc/hosts
|
local://etc/hosts
|
||||||
hdfs://cluster1/users/example/data/set1
|
hdfs://cluster1/users/example/data/set1
|
||||||
hdfs://cluster2.example.org:9820/users/example/data/set1
|
hdfs://cluster2.example.org:8020/users/example/data/set1
|
||||||
|
|
||||||
The Swift Filesystem Client adds a new URL type `swift`. In a Swift Filesystem URL, the hostname part of a URL identifies the container and the service to work with; the path the name of the object. Here are some examples
|
The Swift Filesystem Client adds a new URL type `swift`. In a Swift Filesystem URL, the hostname part of a URL identifies the container and the service to work with; the path the name of the object. Here are some examples
|
||||||
|
|
||||||
|
|
|
@ -4547,7 +4547,7 @@
|
||||||
"yarn.nodemanager.keytab" : "/etc/krb5.keytab",
|
"yarn.nodemanager.keytab" : "/etc/krb5.keytab",
|
||||||
"mapreduce.task.io.sort.factor" : "10",
|
"mapreduce.task.io.sort.factor" : "10",
|
||||||
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
|
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
|
||||||
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
|
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
|
||||||
"yarn.admin.acl" : "*",
|
"yarn.admin.acl" : "*",
|
||||||
"mapreduce.job.speculative.speculativecap" : "0.1",
|
"mapreduce.job.speculative.speculativecap" : "0.1",
|
||||||
"dfs.namenode.num.checkpoints.retained" : "2",
|
"dfs.namenode.num.checkpoints.retained" : "2",
|
||||||
|
@ -4795,7 +4795,7 @@
|
||||||
"ftp.stream-buffer-size" : "4096",
|
"ftp.stream-buffer-size" : "4096",
|
||||||
"dfs.namenode.avoid.write.stale.datanode" : "false",
|
"dfs.namenode.avoid.write.stale.datanode" : "false",
|
||||||
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
|
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
|
||||||
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-1",
|
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-1",
|
||||||
"dfs.blockreport.initialDelay" : "0",
|
"dfs.blockreport.initialDelay" : "0",
|
||||||
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
|
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
|
||||||
"hadoop.http.authentication.token.validity" : "36000",
|
"hadoop.http.authentication.token.validity" : "36000",
|
||||||
|
@ -4839,7 +4839,7 @@
|
||||||
"hadoop.security.auth_to_local" : "DEFAULT",
|
"hadoop.security.auth_to_local" : "DEFAULT",
|
||||||
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
|
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
|
||||||
"ftp.client-write-packet-size" : "65536",
|
"ftp.client-write-packet-size" : "65536",
|
||||||
"fs.defaultFS" : "hdfs://a2115.smile.com:9820",
|
"fs.defaultFS" : "hdfs://a2115.smile.com:8020",
|
||||||
"yarn.nodemanager.address" : "0.0.0.0:0",
|
"yarn.nodemanager.address" : "0.0.0.0:0",
|
||||||
"yarn.scheduler.fair.assignmultiple" : "true",
|
"yarn.scheduler.fair.assignmultiple" : "true",
|
||||||
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
|
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
|
||||||
|
@ -9628,7 +9628,7 @@
|
||||||
"yarn.nodemanager.keytab" : "/etc/krb5.keytab",
|
"yarn.nodemanager.keytab" : "/etc/krb5.keytab",
|
||||||
"mapreduce.task.io.sort.factor" : "10",
|
"mapreduce.task.io.sort.factor" : "10",
|
||||||
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
|
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
|
||||||
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
|
"mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
|
||||||
"yarn.admin.acl" : "*",
|
"yarn.admin.acl" : "*",
|
||||||
"mapreduce.job.speculative.speculativecap" : "0.1",
|
"mapreduce.job.speculative.speculativecap" : "0.1",
|
||||||
"dfs.namenode.num.checkpoints.retained" : "2",
|
"dfs.namenode.num.checkpoints.retained" : "2",
|
||||||
|
@ -9876,7 +9876,7 @@
|
||||||
"ftp.stream-buffer-size" : "4096",
|
"ftp.stream-buffer-size" : "4096",
|
||||||
"dfs.namenode.avoid.write.stale.datanode" : "false",
|
"dfs.namenode.avoid.write.stale.datanode" : "false",
|
||||||
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
|
"hadoop.security.group.mapping.ldap.search.attr.member" : "member",
|
||||||
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-2",
|
"mapreduce.output.fileoutputformat.outputdir" : "hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-2",
|
||||||
"dfs.blockreport.initialDelay" : "0",
|
"dfs.blockreport.initialDelay" : "0",
|
||||||
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
|
"yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
|
||||||
"hadoop.http.authentication.token.validity" : "36000",
|
"hadoop.http.authentication.token.validity" : "36000",
|
||||||
|
@ -9920,7 +9920,7 @@
|
||||||
"hadoop.security.auth_to_local" : "DEFAULT",
|
"hadoop.security.auth_to_local" : "DEFAULT",
|
||||||
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
|
"dfs.secondary.namenode.kerberos.internal.spnego.principal" : "${dfs.web.authentication.kerberos.principal}",
|
||||||
"ftp.client-write-packet-size" : "65536",
|
"ftp.client-write-packet-size" : "65536",
|
||||||
"fs.defaultFS" : "hdfs://a2115.smile.com:9820",
|
"fs.defaultFS" : "hdfs://a2115.smile.com:8020",
|
||||||
"yarn.nodemanager.address" : "0.0.0.0:0",
|
"yarn.nodemanager.address" : "0.0.0.0:0",
|
||||||
"yarn.scheduler.fair.assignmultiple" : "true",
|
"yarn.scheduler.fair.assignmultiple" : "true",
|
||||||
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
|
"yarn.resourcemanager.scheduler.client.thread-count" : "50",
|
||||||
|
|
|
@ -147,7 +147,7 @@ public class RegistryTestHelper extends Assert {
|
||||||
Map<String, String> url = addressList.get(0);
|
Map<String, String> url = addressList.get(0);
|
||||||
String addr = url.get("uri");
|
String addr = url.get("uri");
|
||||||
assertTrue(addr.contains("http"));
|
assertTrue(addr.contains("http"));
|
||||||
assertTrue(addr.contains(":9820"));
|
assertTrue(addr.contains(":8020"));
|
||||||
|
|
||||||
Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2);
|
Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2);
|
||||||
assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT,
|
assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT,
|
||||||
|
@ -275,7 +275,7 @@ public class RegistryTestHelper extends Assert {
|
||||||
new URI("http", hostname + ":80", "/")));
|
new URI("http", hostname + ":80", "/")));
|
||||||
entry.addExternalEndpoint(
|
entry.addExternalEndpoint(
|
||||||
restEndpoint(API_WEBHDFS,
|
restEndpoint(API_WEBHDFS,
|
||||||
new URI("http", hostname + ":9820", "/")));
|
new URI("http", hostname + ":8020", "/")));
|
||||||
|
|
||||||
Endpoint endpoint = ipcEndpoint(API_HDFS, null);
|
Endpoint endpoint = ipcEndpoint(API_HDFS, null);
|
||||||
endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030));
|
endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030));
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TestPBRecordImpl {
|
||||||
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
|
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
|
||||||
assertTrue(ret instanceof LocalResourcePBImpl);
|
assertTrue(ret instanceof LocalResourcePBImpl);
|
||||||
ret.setResource(URL.fromPath(new Path(
|
ret.setResource(URL.fromPath(new Path(
|
||||||
"hdfs://y.ak:9820/foo/bar")));
|
"hdfs://y.ak:8020/foo/bar")));
|
||||||
ret.setSize(4344L);
|
ret.setSize(4344L);
|
||||||
ret.setTimestamp(3141592653589793L);
|
ret.setTimestamp(3141592653589793L);
|
||||||
ret.setVisibility(LocalResourceVisibility.PUBLIC);
|
ret.setVisibility(LocalResourceVisibility.PUBLIC);
|
||||||
|
|
Loading…
Reference in New Issue