HDFS-6442: Merging r1598078 from trunk to branch-2.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1598081 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f8dd75e7e9
commit
b97fac3359
|
@ -283,6 +283,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6227. ShortCircuitCache#unref should purge ShortCircuitReplicas whose
|
||||
streams have been closed by java interrupts. (Colin Patrick McCabe via jing9)
|
||||
|
||||
HDFS-6442. Fix TestEditLogAutoroll and TestStandbyCheckpoints failure
|
||||
caused by port conficts. (Zesheng Wu via Arpit Agarwal)
|
||||
|
||||
HDFS-6448. BlockReaderLocalLegacy should set socket timeout based on
|
||||
conf.socketTimeout (liangxie via cmccabe)
|
||||
|
||||
|
|
|
@ -17,11 +17,16 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.net.BindException;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -43,6 +48,9 @@ public class TestEditLogAutoroll {
|
|||
private NameNode nn0;
|
||||
private FileSystem fs;
|
||||
private FSEditLog editLog;
|
||||
private final Random random = new Random();
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TestEditLog.class);
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
|
@ -54,24 +62,35 @@ public class TestEditLogAutoroll {
|
|||
conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
|
||||
conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);
|
||||
|
||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061))
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062)));
|
||||
int retryCount = 0;
|
||||
while (true) {
|
||||
try {
|
||||
int basePort = 10060 + random.nextInt(100) * 2;
|
||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(topology)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(topology)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
nn0 = cluster.getNameNode(0);
|
||||
fs = HATestUtil.configureFailoverFs(cluster, conf);
|
||||
nn0 = cluster.getNameNode(0);
|
||||
fs = HATestUtil.configureFailoverFs(cluster, conf);
|
||||
|
||||
cluster.transitionToActive(0);
|
||||
cluster.transitionToActive(0);
|
||||
|
||||
fs = cluster.getFileSystem(0);
|
||||
editLog = nn0.getNamesystem().getEditLog();
|
||||
fs = cluster.getFileSystem(0);
|
||||
editLog = nn0.getNamesystem().getEditLog();
|
||||
++retryCount;
|
||||
break;
|
||||
} catch (BindException e) {
|
||||
LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
|
||||
+ retryCount + " times");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -25,12 +25,14 @@ import static org.junit.Assert.fail;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.BindException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -73,6 +75,7 @@ public class TestStandbyCheckpoints {
|
|||
protected MiniDFSCluster cluster;
|
||||
protected NameNode nn0, nn1;
|
||||
protected FileSystem fs;
|
||||
private final Random random = new Random();
|
||||
protected File tmpOivImgDir;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TestStandbyCheckpoints.class);
|
||||
|
@ -87,22 +90,33 @@ public class TestStandbyCheckpoints {
|
|||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
|
||||
|
||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061))
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062)));
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(topology)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
nn0 = cluster.getNameNode(0);
|
||||
nn1 = cluster.getNameNode(1);
|
||||
fs = HATestUtil.configureFailoverFs(cluster, conf);
|
||||
int retryCount = 0;
|
||||
while (true) {
|
||||
try {
|
||||
int basePort = 10060 + random.nextInt(100) * 2;
|
||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
|
||||
|
||||
cluster.transitionToActive(0);
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(topology)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
nn0 = cluster.getNameNode(0);
|
||||
nn1 = cluster.getNameNode(1);
|
||||
fs = HATestUtil.configureFailoverFs(cluster, conf);
|
||||
|
||||
cluster.transitionToActive(0);
|
||||
++retryCount;
|
||||
break;
|
||||
} catch (BindException e) {
|
||||
LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
|
||||
+ retryCount + " times");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected Configuration setupCommonConfig() {
|
||||
|
|
Loading…
Reference in New Issue