From 78a21e773db60586ebde043d4b274ec5fbb9fd64 Mon Sep 17 00:00:00 2001 From: Yongjun Zhang Date: Fri, 4 Dec 2015 13:45:01 -0800 Subject: [PATCH] HDFS-9474. TestPipelinesFailover should not fail when printing debug message. (John Zhuge via Yongjun Zhang) (cherry picked from commit 59dbe8b3e96d13c2322cabd87c7f893c5a3812ba) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../namenode/ha/TestPipelinesFailover.java | 38 +++++++++---------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index de68a7e1f93..395dcf370c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -846,6 +846,9 @@ Release 2.8.0 - UNRELEASED HDFS-9490. MiniDFSCluster should change block generation stamp via FsDatasetTestUtils. (Tony Wu via lei) + HDFS-9474. TestPipelinesFailover should not fail when printing debug + message. (John Zhuge via Yongjun Zhang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index 47b38176597..429b2bf5c4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -55,7 +55,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; -import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.log4j.Level; import org.junit.Test; @@ -420,29 +420,27 @@ public class TestPipelinesFailover { // The following section of code is to help debug HDFS-6694 about // this test that fails from time to time due to "too many open files". // + LOG.info("HDFS-6694 Debug Data BEGIN"); - // Only collect debug data on these OSes. - if (Shell.LINUX || Shell.SOLARIS || Shell.MAC) { - System.out.println("HDFS-6694 Debug Data BEGIN==="); - - String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"}; - ShellCommandExecutor sce = new ShellCommandExecutor(scmd); - sce.execute(); - System.out.println("'ulimit -a' output:\n" + sce.getOutput()); + String[][] scmds = new String[][] { + {"/bin/sh", "-c", "ulimit -a"}, + {"hostname"}, + {"ifconfig", "-a"} + }; - scmd = new String[] {"hostname"}; - sce = new ShellCommandExecutor(scmd); - sce.execute(); - System.out.println("'hostname' output:\n" + sce.getOutput()); - - scmd = new String[] {"ifconfig", "-a"}; - sce = new ShellCommandExecutor(scmd); - sce.execute(); - System.out.println("'ifconfig' output:\n" + sce.getOutput()); - - System.out.println("===HDFS-6694 Debug Data END"); + for (String[] scmd: scmds) { + String scmd_str = StringUtils.join(" ", scmd); + try { + ShellCommandExecutor sce = new ShellCommandExecutor(scmd); + sce.execute(); + LOG.info("'" + scmd_str + "' output:\n" + sce.getOutput()); + } catch (IOException e) { + LOG.warn("Error when running '" + scmd_str + "'", e); + } } + LOG.info("HDFS-6694 Debug Data END"); + HAStressTestHarness harness = new HAStressTestHarness(); // Disable permissions so that another user can recover the lease. harness.conf.setBoolean(