HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT config key. Contributed by Mingliang Liu.
This commit is contained in:
parent
e552775545
commit
5546df0020
|
@ -561,6 +561,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe
|
HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe
|
||||||
Zhang via Colin P. McCabe)
|
Zhang via Colin P. McCabe)
|
||||||
|
|
||||||
|
HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.
|
||||||
|
DFS_NAMENODE_RPC_PORT_DEFAULT config key. (Mingliang Liu via wheat9)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSInputStream;
|
import org.apache.hadoop.hdfs.DFSInputStream;
|
||||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
||||||
|
@ -49,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
|
@ -77,7 +77,8 @@ public class Hdfs extends AbstractFileSystem {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
|
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
|
||||||
super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
|
super(theUri, HdfsConstants.HDFS_URI_SCHEME, true,
|
||||||
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
|
|
||||||
if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
|
if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
|
||||||
throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
|
throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
|
||||||
|
@ -92,7 +93,7 @@ public class Hdfs extends AbstractFileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getUriDefaultPort() {
|
public int getUriDefaultPort() {
|
||||||
return NameNode.DEFAULT_PORT;
|
return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -65,6 +65,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
|
@ -86,7 +87,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
@ -1568,7 +1568,7 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return NameNode.DEFAULT_PORT;
|
return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -509,7 +509,8 @@ public class NameNodeProxies {
|
||||||
// Check the port in the URI, if it is logical.
|
// Check the port in the URI, if it is logical.
|
||||||
if (checkPort && providerNN.useLogicalURI()) {
|
if (checkPort && providerNN.useLogicalURI()) {
|
||||||
int port = nameNodeUri.getPort();
|
int port = nameNodeUri.getPort();
|
||||||
if (port > 0 && port != NameNode.DEFAULT_PORT) {
|
if (port > 0 &&
|
||||||
|
port != HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
|
||||||
// Throwing here without any cleanup is fine since we have not
|
// Throwing here without any cleanup is fine since we have not
|
||||||
// actually created the underlying proxies yet.
|
// actually created the underlying proxies yet.
|
||||||
throw new IOException("Port " + port + " specified in URI "
|
throw new IOException("Port " + port + " specified in URI "
|
||||||
|
|
|
@ -39,9 +39,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
@ -110,6 +110,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY;
|
||||||
|
@ -314,7 +315,12 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final int DEFAULT_PORT = 8020;
|
/**
|
||||||
|
* @deprecated Use {@link HdfsClientConfigKeys#DFS_NAMENODE_RPC_PORT_DEFAULT}
|
||||||
|
* instead.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
public static final int DEFAULT_PORT = DFS_NAMENODE_RPC_PORT_DEFAULT;
|
||||||
public static final Logger LOG =
|
public static final Logger LOG =
|
||||||
LoggerFactory.getLogger(NameNode.class.getName());
|
LoggerFactory.getLogger(NameNode.class.getName());
|
||||||
public static final Logger stateChangeLog =
|
public static final Logger stateChangeLog =
|
||||||
|
@ -452,7 +458,7 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static InetSocketAddress getAddress(String address) {
|
public static InetSocketAddress getAddress(String address) {
|
||||||
return NetUtils.createSocketAddr(address, DEFAULT_PORT);
|
return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -509,7 +515,8 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
|
|
||||||
public static URI getUri(InetSocketAddress namenode) {
|
public static URI getUri(InetSocketAddress namenode) {
|
||||||
int port = namenode.getPort();
|
int port = namenode.getPort();
|
||||||
String portString = port == DEFAULT_PORT ? "" : (":"+port);
|
String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ?
|
||||||
|
"" : (":" + port);
|
||||||
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||||
+ namenode.getHostName()+portString);
|
+ namenode.getHostName()+portString);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.ha.NodeFencer;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
|
||||||
|
@ -77,7 +78,7 @@ public class NNHAServiceTarget extends HAServiceTarget {
|
||||||
"Unable to determine service address for namenode '" + nnId + "'");
|
"Unable to determine service address for namenode '" + nnId + "'");
|
||||||
}
|
}
|
||||||
this.addr = NetUtils.createSocketAddr(serviceAddr,
|
this.addr = NetUtils.createSocketAddr(serviceAddr,
|
||||||
NameNode.DEFAULT_PORT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
|
|
||||||
this.autoFailoverEnabled = targetConf.getBoolean(
|
this.autoFailoverEnabled = targetConf.getBoolean(
|
||||||
DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
|
DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
|
import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
|
||||||
|
@ -89,7 +90,7 @@ public class TestAppendSnapshotTruncate {
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.format(true)
|
.format(true)
|
||||||
.numDataNodes(DATANODE_NUM)
|
.numDataNodes(DATANODE_NUM)
|
||||||
.nameNodePort(NameNode.DEFAULT_PORT)
|
.nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
|
||||||
.waitSafeMode(true)
|
.waitSafeMode(true)
|
||||||
.build();
|
.build();
|
||||||
dfs = cluster.getFileSystem();
|
dfs = cluster.getFileSystem();
|
||||||
|
|
|
@ -115,7 +115,8 @@ public class TestDFSClientFailover {
|
||||||
// to include a port number.
|
// to include a port number.
|
||||||
Path withPort = new Path("hdfs://" +
|
Path withPort = new Path("hdfs://" +
|
||||||
HATestUtil.getLogicalHostname(cluster) + ":" +
|
HATestUtil.getLogicalHostname(cluster) + ":" +
|
||||||
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/" +
|
||||||
|
TEST_FILE.toUri().getPath());
|
||||||
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
|
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
|
||||||
assertTrue(fs2.exists(withPort));
|
assertTrue(fs2.exists(withPort));
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,9 @@ import java.net.URI;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/** Test NameNode port defaulting code. */
|
/** Test NameNode port defaulting code. */
|
||||||
|
@ -33,9 +35,9 @@ public class TestDefaultNameNodePort {
|
||||||
@Test
|
@Test
|
||||||
public void testGetAddressFromString() throws Exception {
|
public void testGetAddressFromString() throws Exception {
|
||||||
assertEquals(NameNode.getAddress("foo").getPort(),
|
assertEquals(NameNode.getAddress("foo").getPort(),
|
||||||
NameNode.DEFAULT_PORT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
|
assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
|
||||||
NameNode.DEFAULT_PORT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
|
assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
|
||||||
555);
|
555);
|
||||||
assertEquals(NameNode.getAddress("foo:555").getPort(),
|
assertEquals(NameNode.getAddress("foo:555").getPort(),
|
||||||
|
@ -46,11 +48,13 @@ public class TestDefaultNameNodePort {
|
||||||
public void testGetAddressFromConf() throws Exception {
|
public void testGetAddressFromConf() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://foo/");
|
FileSystem.setDefaultUri(conf, "hdfs://foo/");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
|
assertEquals(NameNode.getAddress(conf).getPort(),
|
||||||
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
|
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(), 555);
|
assertEquals(NameNode.getAddress(conf).getPort(), 555);
|
||||||
FileSystem.setDefaultUri(conf, "foo");
|
FileSystem.setDefaultUri(conf, "foo");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
|
assertEquals(NameNode.getAddress(conf).getPort(),
|
||||||
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -58,7 +62,7 @@ public class TestDefaultNameNodePort {
|
||||||
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
|
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
|
||||||
URI.create("hdfs://foo:555"));
|
URI.create("hdfs://foo:555"));
|
||||||
assertEquals(NameNode.getUri(new InetSocketAddress("foo",
|
assertEquals(NameNode.getUri(new InetSocketAddress("foo",
|
||||||
NameNode.DEFAULT_PORT)),
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
|
||||||
URI.create("hdfs://foo"));
|
URI.create("hdfs://foo"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,8 +31,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ public class TestBalancerWithHANameNodes {
|
||||||
assertEquals(capacities.length, racks.length);
|
assertEquals(capacities.length, racks.length);
|
||||||
int numOfDatanodes = capacities.length;
|
int numOfDatanodes = capacities.length;
|
||||||
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
|
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
|
||||||
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
|
nn1Conf.setIpcPort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
Configuration copiedConf = new Configuration(conf);
|
Configuration copiedConf = new Configuration(conf);
|
||||||
cluster = new MiniDFSCluster.Builder(copiedConf)
|
cluster = new MiniDFSCluster.Builder(copiedConf)
|
||||||
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
@ -99,7 +100,7 @@ public class TestFileTruncate {
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.format(true)
|
.format(true)
|
||||||
.numDataNodes(DATANODE_NUM)
|
.numDataNodes(DATANODE_NUM)
|
||||||
.nameNodePort(NameNode.DEFAULT_PORT)
|
.nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
|
||||||
.waitSafeMode(true)
|
.waitSafeMode(true)
|
||||||
.build();
|
.build();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
|
@ -1224,7 +1225,7 @@ public class TestFileTruncate {
|
||||||
NameNode.doRollback(conf, false);
|
NameNode.doRollback(conf, false);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
|
||||||
.format(false)
|
.format(false)
|
||||||
.nameNodePort(NameNode.DEFAULT_PORT)
|
.nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
|
||||||
.startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
|
.startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
|
||||||
.dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
|
.dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
|
||||||
.build();
|
.build();
|
||||||
|
|
Loading…
Reference in New Issue