svn merge -c -1354790 for reverting HDFS-3576 since it requires more changes.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1354792 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
44389399d4
commit
f105784d6a
|
@ -250,9 +250,6 @@ Branch-2 ( Unreleased changes )
|
||||||
|
|
||||||
HDFS-3572. Cleanup code which inits SPNEGO in HttpServer (todd)
|
HDFS-3572. Cleanup code which inits SPNEGO in HttpServer (todd)
|
||||||
|
|
||||||
HDFS-3576. Move the constant NameNode.DEFAULT_PORT to
|
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT. (Brandon Li via szetszwo)
|
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2982. Startup performance suffers when there are many edit log
|
HDFS-2982. Startup performance suffers when there are many edit log
|
||||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
|
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
|
@ -43,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
|
@ -71,8 +71,7 @@ public class Hdfs extends AbstractFileSystem {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
|
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
|
||||||
super(theUri, HdfsConstants.HDFS_URI_SCHEME, true,
|
super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
|
||||||
|
|
||||||
if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
|
if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
|
||||||
throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
|
throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
|
||||||
|
@ -87,7 +86,7 @@ public class Hdfs extends AbstractFileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getUriDefaultPort() {
|
public int getUriDefaultPort() {
|
||||||
return DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
|
return NameNode.DEFAULT_PORT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -87,7 +87,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
||||||
public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||||
public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
|
public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
|
||||||
public static final int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
|
|
||||||
public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
|
public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
|
||||||
public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
|
public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
|
||||||
public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
|
public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
|
||||||
|
|
|
@ -734,7 +734,7 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
|
return NameNode.DEFAULT_PORT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -402,7 +402,7 @@ public class NameNodeProxies {
|
||||||
// If we found a proxy provider, then this URI should be a logical NN.
|
// If we found a proxy provider, then this URI should be a logical NN.
|
||||||
// Given that, it shouldn't have a non-default port number.
|
// Given that, it shouldn't have a non-default port number.
|
||||||
int port = nameNodeUri.getPort();
|
int port = nameNodeUri.getPort();
|
||||||
if (port > 0 && port != DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
|
if (port > 0 && port != NameNode.DEFAULT_PORT) {
|
||||||
throw new IOException("Port " + port + " specified in URI "
|
throw new IOException("Port " + port + " specified in URI "
|
||||||
+ nameNodeUri + " but host '" + host
|
+ nameNodeUri + " but host '" + host
|
||||||
+ "' is a logical (HA) namenode"
|
+ "' is a logical (HA) namenode"
|
||||||
|
|
|
@ -22,7 +22,7 @@ import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
@ -57,7 +57,7 @@ public class DelegationTokenSelector
|
||||||
Text serviceName = SecurityUtil.buildTokenService(nnUri);
|
Text serviceName = SecurityUtil.buildTokenService(nnUri);
|
||||||
final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
|
final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
|
||||||
|
|
||||||
int nnRpcPort = DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
|
int nnRpcPort = NameNode.DEFAULT_PORT;
|
||||||
if (nnServiceName != null) {
|
if (nnServiceName != null) {
|
||||||
nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort();
|
nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort();
|
||||||
}
|
}
|
||||||
|
|
|
@ -214,6 +214,7 @@ public class NameNode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final int DEFAULT_PORT = 8020;
|
||||||
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
|
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
|
||||||
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
|
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
|
||||||
public static final HAState ACTIVE_STATE = new ActiveState();
|
public static final HAState ACTIVE_STATE = new ActiveState();
|
||||||
|
@ -269,7 +270,7 @@ public class NameNode {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static InetSocketAddress getAddress(String address) {
|
public static InetSocketAddress getAddress(String address) {
|
||||||
return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT);
|
return NetUtils.createSocketAddr(address, DEFAULT_PORT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -328,8 +329,7 @@ public class NameNode {
|
||||||
|
|
||||||
public static URI getUri(InetSocketAddress namenode) {
|
public static URI getUri(InetSocketAddress namenode) {
|
||||||
int port = namenode.getPort();
|
int port = namenode.getPort();
|
||||||
String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ?
|
String portString = port == DEFAULT_PORT ? "" : (":"+port);
|
||||||
"" : (":"+port);
|
|
||||||
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||||
+ namenode.getHostName()+portString);
|
+ namenode.getHostName()+portString);
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class NNHAServiceTarget extends HAServiceTarget {
|
||||||
"Unable to determine service address for namenode '" + nnId + "'");
|
"Unable to determine service address for namenode '" + nnId + "'");
|
||||||
}
|
}
|
||||||
this.addr = NetUtils.createSocketAddr(serviceAddr,
|
this.addr = NetUtils.createSocketAddr(serviceAddr,
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
NameNode.DEFAULT_PORT);
|
||||||
|
|
||||||
this.autoFailoverEnabled = targetConf.getBoolean(
|
this.autoFailoverEnabled = targetConf.getBoolean(
|
||||||
DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
|
DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
|
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
@ -81,9 +82,9 @@ public class TestDFSClientFailover {
|
||||||
|
|
||||||
// Check that it functions even if the URL becomes canonicalized
|
// Check that it functions even if the URL becomes canonicalized
|
||||||
// to include a port number.
|
// to include a port number.
|
||||||
Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster)
|
Path withPort = new Path("hdfs://" +
|
||||||
+ ":" + DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/"
|
HATestUtil.getLogicalHostname(cluster) + ":" +
|
||||||
+ TEST_FILE.toUri().getPath());
|
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
|
||||||
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
|
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
|
||||||
assertTrue(fs2.exists(withPort));
|
assertTrue(fs2.exists(withPort));
|
||||||
|
|
||||||
|
|
|
@ -31,9 +31,9 @@ public class TestDefaultNameNodePort extends TestCase {
|
||||||
|
|
||||||
public void testGetAddressFromString() throws Exception {
|
public void testGetAddressFromString() throws Exception {
|
||||||
assertEquals(NameNode.getAddress("foo").getPort(),
|
assertEquals(NameNode.getAddress("foo").getPort(),
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
NameNode.DEFAULT_PORT);
|
||||||
assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
|
assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
NameNode.DEFAULT_PORT);
|
||||||
assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
|
assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
|
||||||
555);
|
555);
|
||||||
assertEquals(NameNode.getAddress("foo:555").getPort(),
|
assertEquals(NameNode.getAddress("foo:555").getPort(),
|
||||||
|
@ -43,20 +43,18 @@ public class TestDefaultNameNodePort extends TestCase {
|
||||||
public void testGetAddressFromConf() throws Exception {
|
public void testGetAddressFromConf() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://foo/");
|
FileSystem.setDefaultUri(conf, "hdfs://foo/");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(),
|
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
|
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(), 555);
|
assertEquals(NameNode.getAddress(conf).getPort(), 555);
|
||||||
FileSystem.setDefaultUri(conf, "foo");
|
FileSystem.setDefaultUri(conf, "foo");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(),
|
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testGetUri() {
|
public void testGetUri() {
|
||||||
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
|
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
|
||||||
URI.create("hdfs://foo:555"));
|
URI.create("hdfs://foo:555"));
|
||||||
assertEquals(NameNode.getUri(new InetSocketAddress("foo",
|
assertEquals(NameNode.getUri(new InetSocketAddress("foo",
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
|
NameNode.DEFAULT_PORT)),
|
||||||
URI.create("hdfs://foo"));
|
URI.create("hdfs://foo"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
@ -33,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ public class TestBalancerWithHANameNodes {
|
||||||
assertEquals(capacities.length, racks.length);
|
assertEquals(capacities.length, racks.length);
|
||||||
int numOfDatanodes = capacities.length;
|
int numOfDatanodes = capacities.length;
|
||||||
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
|
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
|
||||||
nn1Conf.setIpcPort(DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
|
||||||
Configuration copiedConf = new Configuration(conf);
|
Configuration copiedConf = new Configuration(conf);
|
||||||
cluster = new MiniDFSCluster.Builder(copiedConf)
|
cluster = new MiniDFSCluster.Builder(copiedConf)
|
||||||
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||||
|
|
|
@ -39,7 +39,6 @@ import junit.framework.TestCase;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.fs.shell.CommandFormat;
|
import org.apache.hadoop.fs.shell.CommandFormat;
|
||||||
|
@ -510,10 +509,10 @@ public class TestFileSystem extends TestCase {
|
||||||
|
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
runTestCache(DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
runTestCache(NameNode.DEFAULT_PORT);
|
||||||
} catch(java.net.BindException be) {
|
} catch(java.net.BindException be) {
|
||||||
LOG.warn("Cannot test NameNode's default RPC port (="
|
LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
|
||||||
+ DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + ")", be);
|
+ NameNode.DEFAULT_PORT + ")", be);
|
||||||
}
|
}
|
||||||
|
|
||||||
runTestCache(0);
|
runTestCache(0);
|
||||||
|
@ -536,11 +535,11 @@ public class TestFileSystem extends TestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (port == DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
|
if (port == NameNode.DEFAULT_PORT) {
|
||||||
//test explicit default port
|
//test explicit default port
|
||||||
URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(),
|
URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(),
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, uri.getPath(),
|
uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(),
|
||||||
uri.getQuery(), uri.getFragment());
|
uri.getQuery(), uri.getFragment());
|
||||||
LOG.info("uri2=" + uri2);
|
LOG.info("uri2=" + uri2);
|
||||||
FileSystem fs = FileSystem.get(uri2, conf);
|
FileSystem fs = FileSystem.get(uri2, conf);
|
||||||
checkPath(cluster, fs);
|
checkPath(cluster, fs);
|
||||||
|
|
Loading…
Reference in New Issue