diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java index e2f8b842f3f..ebebd25003f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java @@ -18,6 +18,18 @@ package org.apache.hadoop.fs.http.client; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Writer; +import java.net.URL; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.Collection; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; @@ -44,18 +56,6 @@ import org.junit.runners.Parameterized; import org.mortbay.jetty.Server; import org.mortbay.jetty.webapp.WebAppContext; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.Writer; -import java.net.URL; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.Collection; - @RunWith(value = Parameterized.class) public class TestHttpFSFileSystem extends HFSTestCase { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java index 7c5b94c7c7e..95c005976c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.http.client; +import java.net.URI; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; @@ -26,8 +28,6 @@ import org.junit.Assert; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.net.URI; - @RunWith(value = Parameterized.class) public class TestWebhdfsFileSystem extends TestHttpFSFileSystem { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java index 9996e0bea02..947f928a0e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java @@ -18,15 +18,15 @@ package org.apache.hadoop.fs.http.server; -import org.apache.hadoop.fs.http.client.HttpFSFileSystem; -import org.junit.Test; -import org.mockito.Mockito; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.fs.http.client.HttpFSFileSystem; +import org.junit.Test; +import org.mockito.Mockito; + public class TestCheckUploadContentTypeFilter { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index ff525e643a7..099eb4bf81b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -18,7 +18,23 @@ package org.apache.hadoop.fs.http.server; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.Writer; +import java.net.HttpURLConnection; +import java.net.URL; +import java.text.MessageFormat; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -38,20 +54,6 @@ import org.junit.Test; import org.mortbay.jetty.Server; import org.mortbay.jetty.webapp.WebAppContext; -import java.io.BufferedReader; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.Writer; -import java.net.HttpURLConnection; -import java.net.URL; -import java.text.MessageFormat; -import java.util.Arrays; -import java.util.List; - public class TestHttpFSServer extends HFSTestCase { @Test @@ -103,9 +105,9 @@ public class TestHttpFSServer extends HFSTestCase { } private void createHttpFSServer() throws Exception { File homeDir = TestDirHelper.getTestDir(); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath()); File secretFile = new File(new File(homeDir, "conf"), "secret"); @@ -157,23 +159,23 @@ public class TestHttpFSServer extends HFSTestCase { URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody")); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED); url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); String line = reader.readLine(); reader.close(); - Assert.assertTrue(line.contains("\"counters\":{")); + assertTrue(line.contains("\"counters\":{")); url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); } @Test @@ -187,7 +189,7 @@ public class TestHttpFSServer extends HFSTestCase { URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user)); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); @@ -208,7 +210,7 @@ public class TestHttpFSServer extends HFSTestCase { URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user)); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); @@ -228,7 +230,7 @@ public class TestHttpFSServer extends HFSTestCase { conn.setDoInput(true); conn.setDoOutput(true); conn.setRequestMethod("PUT"); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java index 6079cf256f3..1520af87761 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java @@ -19,12 +19,14 @@ package org.apache.hadoop.lib.lang; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.concurrent.Callable; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; + public class TestRunnableCallable extends HTestCase { public static class R implements Runnable { @@ -59,14 +61,14 @@ public class TestRunnableCallable extends HTestCase { R r = new R(); RunnableCallable rc = new RunnableCallable(r); rc.run(); - Assert.assertTrue(r.RUN); + assertTrue(r.RUN); r = new R(); rc = new RunnableCallable(r); rc.call(); - Assert.assertTrue(r.RUN); + assertTrue(r.RUN); - Assert.assertEquals(rc.toString(), "R"); + assertEquals(rc.toString(), "R"); } @Test @@ -74,14 +76,14 @@ public class TestRunnableCallable extends HTestCase { C c = new C(); RunnableCallable rc = new RunnableCallable(c); rc.run(); - Assert.assertTrue(c.RUN); + assertTrue(c.RUN); c = new C(); rc = new RunnableCallable(c); rc.call(); - Assert.assertTrue(c.RUN); + assertTrue(c.RUN); - Assert.assertEquals(rc.toString(), "C"); + assertEquals(rc.toString(), "C"); } @Test(expected = RuntimeException.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java index 0feca3044ba..59d02e3d55e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java @@ -19,7 +19,9 @@ package org.apache.hadoop.lib.lang; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + import org.apache.hadoop.test.HTestCase; import org.junit.Test; @@ -37,26 +39,26 @@ public class TestXException extends HTestCase { @Test public void testXException() throws Exception { XException ex = new XException(TestERROR.TC); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), "TC: {0}"); - Assert.assertNull(ex.getCause()); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), "TC: {0}"); + assertNull(ex.getCause()); ex = new XException(TestERROR.TC, "msg"); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), "TC: msg"); - Assert.assertNull(ex.getCause()); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), "TC: msg"); + assertNull(ex.getCause()); Exception cause = new Exception(); ex = new XException(TestERROR.TC, cause); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), "TC: " + cause.toString()); - Assert.assertEquals(ex.getCause(), cause); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), "TC: " + cause.toString()); + assertEquals(ex.getCause(), cause); XException xcause = ex; ex = new XException(xcause); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), xcause.getMessage()); - Assert.assertEquals(ex.getCause(), xcause); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), xcause.getMessage()); + assertEquals(ex.getCause(), xcause); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java index 037fd63a4cc..402884bfbca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java @@ -18,7 +18,10 @@ package org.apache.hadoop.lib.server; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.HTestCase; import org.junit.Test; @@ -47,9 +50,9 @@ public class TestBaseService extends HTestCase { @Test public void baseService() throws Exception { BaseService service = new MyService(); - Assert.assertNull(service.getInterface()); - Assert.assertEquals(service.getPrefix(), "myservice"); - Assert.assertEquals(service.getServiceDependencies().length, 0); + assertNull(service.getInterface()); + assertEquals(service.getPrefix(), "myservice"); + assertEquals(service.getServiceDependencies().length, 0); Server server = Mockito.mock(Server.class); Configuration conf = new Configuration(false); @@ -60,9 +63,9 @@ public class TestBaseService extends HTestCase { Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice."); service.init(server); - Assert.assertEquals(service.getPrefixedName("foo"), "server.myservice.foo"); - Assert.assertEquals(service.getServiceConfig().size(), 1); - Assert.assertEquals(service.getServiceConfig().get("foo"), "FOO"); - Assert.assertTrue(MyService.INIT); + assertEquals(service.getPrefixedName("foo"), "server.myservice.foo"); + assertEquals(service.getServiceConfig().size(), 1); + assertEquals(service.getServiceConfig().get("foo"), "FOO"); + assertTrue(MyService.INIT); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java index efd366b22d5..2e28441d717 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java @@ -18,16 +18,12 @@ package org.apache.hadoop.lib.server; -import junit.framework.Assert; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.lib.lang.XException; -import org.apache.hadoop.test.HTestCase; -import org.apache.hadoop.test.TestDir; -import org.apache.hadoop.test.TestDirHelper; -import org.apache.hadoop.test.TestException; -import org.apache.hadoop.util.StringUtils; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileOutputStream; @@ -39,50 +35,60 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.lib.lang.XException; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestException; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + public class TestServer extends HTestCase { @Test @TestDir public void constructorsGetters() throws Exception { Server server = new Server("server", "/a", "/b", "/c", "/d", new Configuration(false)); - Assert.assertEquals(server.getHomeDir(), "/a"); - Assert.assertEquals(server.getConfigDir(), "/b"); - Assert.assertEquals(server.getLogDir(), "/c"); - Assert.assertEquals(server.getTempDir(), "/d"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNotNull(server.getConfig()); + assertEquals(server.getHomeDir(), "/a"); + assertEquals(server.getConfigDir(), "/b"); + assertEquals(server.getLogDir(), "/c"); + assertEquals(server.getTempDir(), "/d"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNotNull(server.getConfig()); server = new Server("server", "/a", "/b", "/c", "/d"); - Assert.assertEquals(server.getHomeDir(), "/a"); - Assert.assertEquals(server.getConfigDir(), "/b"); - Assert.assertEquals(server.getLogDir(), "/c"); - Assert.assertEquals(server.getTempDir(), "/d"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNull(server.getConfig()); + assertEquals(server.getHomeDir(), "/a"); + assertEquals(server.getConfigDir(), "/b"); + assertEquals(server.getLogDir(), "/c"); + assertEquals(server.getTempDir(), "/d"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNull(server.getConfig()); server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false)); - Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); - Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); - Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); - Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNotNull(server.getConfig()); + assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); + assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); + assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); + assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNotNull(server.getConfig()); server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath()); - Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); - Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); - Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); - Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNull(server.getConfig()); + assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); + assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); + assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); + assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNull(server.getConfig()); } @Test @@ -113,9 +119,9 @@ public class TestServer extends HTestCase { @TestDir public void initNoConfigDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); Configuration conf = new Configuration(false); conf.set("server.services", TestService.class.getName()); Server server = new Server("server", homeDir.getAbsolutePath(), conf); @@ -127,9 +133,9 @@ public class TestServer extends HTestCase { @TestDir public void initConfigDirNotDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); File configDir = new File(homeDir, "conf"); new FileOutputStream(configDir).close(); Configuration conf = new Configuration(false); @@ -143,9 +149,9 @@ public class TestServer extends HTestCase { @TestDir public void initNoLogDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); Configuration conf = new Configuration(false); conf.set("server.services", TestService.class.getName()); Server server = new Server("server", homeDir.getAbsolutePath(), conf); @@ -157,9 +163,9 @@ public class TestServer extends HTestCase { @TestDir public void initLogDirNotDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); File logDir = new File(homeDir, "log"); new FileOutputStream(logDir).close(); Configuration conf = new Configuration(false); @@ -173,9 +179,9 @@ public class TestServer extends HTestCase { @TestDir public void initNoTempDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); Configuration conf = new Configuration(false); conf.set("server.services", TestService.class.getName()); Server server = new Server("server", homeDir.getAbsolutePath(), conf); @@ -187,9 +193,9 @@ public class TestServer extends HTestCase { @TestDir public void initTempDirNotDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); File tempDir = new File(homeDir, "temp"); new FileOutputStream(tempDir).close(); Configuration conf = new Configuration(false); @@ -204,7 +210,7 @@ public class TestServer extends HTestCase { public void siteFileNotAFile() throws Exception { String homeDir = TestDirHelper.getTestDir().getAbsolutePath(); File siteFile = new File(homeDir, "server-site.xml"); - Assert.assertTrue(siteFile.mkdir()); + assertTrue(siteFile.mkdir()); Server server = new Server("server", homeDir, homeDir, homeDir, homeDir); server.init(); } @@ -234,12 +240,12 @@ public class TestServer extends HTestCase { @Override protected void init() throws ServiceException { - Assert.assertEquals(getServer().getStatus(), Server.Status.BOOTING); + assertEquals(getServer().getStatus(), Server.Status.BOOTING); } @Override public void destroy() { - Assert.assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN); + assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN); super.destroy(); } @@ -255,12 +261,12 @@ public class TestServer extends HTestCase { Configuration conf = new Configuration(false); conf.set("server.services", LifeCycleService.class.getName()); Server server = createServer(conf); - Assert.assertEquals(server.getStatus(), Server.Status.UNDEF); + assertEquals(server.getStatus(), Server.Status.UNDEF); server.init(); - Assert.assertNotNull(server.get(LifeCycleService.class)); - Assert.assertEquals(server.getStatus(), Server.Status.NORMAL); + assertNotNull(server.get(LifeCycleService.class)); + assertEquals(server.getStatus(), Server.Status.NORMAL); server.destroy(); - Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN); + assertEquals(server.getStatus(), Server.Status.SHUTDOWN); } @Test @@ -270,7 +276,7 @@ public class TestServer extends HTestCase { conf.set("server.startup.status", "ADMIN"); Server server = createServer(conf); server.init(); - Assert.assertEquals(server.getStatus(), Server.Status.ADMIN); + assertEquals(server.getStatus(), Server.Status.ADMIN); server.destroy(); } @@ -334,7 +340,7 @@ public class TestServer extends HTestCase { Server server = createServer(conf); server.init(); server.setStatus(Server.Status.ADMIN); - Assert.assertTrue(TestService.LIFECYCLE.contains("serverStatusChange")); + assertTrue(TestService.LIFECYCLE.contains("serverStatusChange")); } @Test @@ -357,7 +363,7 @@ public class TestServer extends HTestCase { server.init(); TestService.LIFECYCLE.clear(); server.setStatus(server.getStatus()); - Assert.assertFalse(TestService.LIFECYCLE.contains("serverStatusChange")); + assertFalse(TestService.LIFECYCLE.contains("serverStatusChange")); } @Test @@ -368,9 +374,9 @@ public class TestServer extends HTestCase { conf.set("server.services", TestService.class.getName()); Server server = createServer(conf); server.init(); - Assert.assertNotNull(server.get(TestService.class)); + assertNotNull(server.get(TestService.class)); server.destroy(); - Assert.assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy")); + assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy")); } @Test @@ -379,7 +385,7 @@ public class TestServer extends HTestCase { String dir = TestDirHelper.getTestDir().getAbsolutePath(); Server server = new Server("testserver", dir, dir, dir, dir); server.init(); - Assert.assertEquals(server.getConfig().get("testserver.a"), "default"); + assertEquals(server.getConfig().get("testserver.a"), "default"); } @Test @@ -392,7 +398,7 @@ public class TestServer extends HTestCase { w.close(); Server server = new Server("testserver", dir, dir, dir, dir); server.init(); - Assert.assertEquals(server.getConfig().get("testserver.a"), "site"); + assertEquals(server.getConfig().get("testserver.a"), "site"); } @Test @@ -407,7 +413,7 @@ public class TestServer extends HTestCase { w.close(); Server server = new Server("testserver", dir, dir, dir, dir); server.init(); - Assert.assertEquals(server.getConfig().get("testserver.a"), "sysprop"); + assertEquals(server.getConfig().get("testserver.a"), "sysprop"); } finally { System.getProperties().remove("testserver.a"); } @@ -633,7 +639,7 @@ public class TestServer extends HTestCase { conf = new Configuration(false); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(ORDER.size(), 0); + assertEquals(ORDER.size(), 0); // 2 services init/destroy ORDER.clear(); @@ -643,17 +649,17 @@ public class TestServer extends HTestCase { conf.set("server.services", services); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); - Assert.assertEquals(server.get(MyService3.class).getInterface(), MyService3.class); - Assert.assertEquals(ORDER.size(), 4); - Assert.assertEquals(ORDER.get(0), "s1.init"); - Assert.assertEquals(ORDER.get(1), "s3.init"); - Assert.assertEquals(ORDER.get(2), "s1.postInit"); - Assert.assertEquals(ORDER.get(3), "s3.postInit"); + assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); + assertEquals(server.get(MyService3.class).getInterface(), MyService3.class); + assertEquals(ORDER.size(), 4); + assertEquals(ORDER.get(0), "s1.init"); + assertEquals(ORDER.get(1), "s3.init"); + assertEquals(ORDER.get(2), "s1.postInit"); + assertEquals(ORDER.get(3), "s3.postInit"); server.destroy(); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s3.destroy"); - Assert.assertEquals(ORDER.get(5), "s1.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s3.destroy"); + assertEquals(ORDER.get(5), "s1.destroy"); // 3 services, 2nd one fails on init ORDER.clear(); @@ -665,16 +671,16 @@ public class TestServer extends HTestCase { server = new Server("server", dir, dir, dir, dir, conf); try { server.init(); - Assert.fail(); + fail(); } catch (ServerException ex) { - Assert.assertEquals(MyService2.class, ex.getError().getClass()); + assertEquals(MyService2.class, ex.getError().getClass()); } catch (Exception ex) { - Assert.fail(); + fail(); } - Assert.assertEquals(ORDER.size(), 3); - Assert.assertEquals(ORDER.get(0), "s1.init"); - Assert.assertEquals(ORDER.get(1), "s2.init"); - Assert.assertEquals(ORDER.get(2), "s1.destroy"); + assertEquals(ORDER.size(), 3); + assertEquals(ORDER.get(0), "s1.init"); + assertEquals(ORDER.get(1), "s2.init"); + assertEquals(ORDER.get(2), "s1.destroy"); // 2 services one fails on destroy ORDER.clear(); @@ -683,15 +689,15 @@ public class TestServer extends HTestCase { conf.set("server.services", services); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(ORDER.size(), 4); - Assert.assertEquals(ORDER.get(0), "s1.init"); - Assert.assertEquals(ORDER.get(1), "s5.init"); - Assert.assertEquals(ORDER.get(2), "s1.postInit"); - Assert.assertEquals(ORDER.get(3), "s5.postInit"); + assertEquals(ORDER.size(), 4); + assertEquals(ORDER.get(0), "s1.init"); + assertEquals(ORDER.get(1), "s5.init"); + assertEquals(ORDER.get(2), "s1.postInit"); + assertEquals(ORDER.get(3), "s5.postInit"); server.destroy(); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s5.destroy"); - Assert.assertEquals(ORDER.get(5), "s1.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s5.destroy"); + assertEquals(ORDER.get(5), "s1.destroy"); // service override via ext @@ -705,16 +711,16 @@ public class TestServer extends HTestCase { server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); - Assert.assertEquals(ORDER.size(), 4); - Assert.assertEquals(ORDER.get(0), "s1a.init"); - Assert.assertEquals(ORDER.get(1), "s3.init"); - Assert.assertEquals(ORDER.get(2), "s1a.postInit"); - Assert.assertEquals(ORDER.get(3), "s3.postInit"); + assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); + assertEquals(ORDER.size(), 4); + assertEquals(ORDER.get(0), "s1a.init"); + assertEquals(ORDER.get(1), "s3.init"); + assertEquals(ORDER.get(2), "s1a.postInit"); + assertEquals(ORDER.get(3), "s3.postInit"); server.destroy(); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s3.destroy"); - Assert.assertEquals(ORDER.get(5), "s1a.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s3.destroy"); + assertEquals(ORDER.get(5), "s1a.destroy"); // service override via setService ORDER.clear(); @@ -725,16 +731,16 @@ public class TestServer extends HTestCase { server.init(); server.setService(MyService1a.class); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s1.destroy"); - Assert.assertEquals(ORDER.get(5), "s1a.init"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s1.destroy"); + assertEquals(ORDER.get(5), "s1a.init"); - Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); + assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); server.destroy(); - Assert.assertEquals(ORDER.size(), 8); - Assert.assertEquals(ORDER.get(6), "s3.destroy"); - Assert.assertEquals(ORDER.get(7), "s1a.destroy"); + assertEquals(ORDER.size(), 8); + assertEquals(ORDER.get(6), "s3.destroy"); + assertEquals(ORDER.get(7), "s1a.destroy"); // service add via setService ORDER.clear(); @@ -745,16 +751,16 @@ public class TestServer extends HTestCase { server.init(); server.setService(MyService5.class); - Assert.assertEquals(ORDER.size(), 5); - Assert.assertEquals(ORDER.get(4), "s5.init"); + assertEquals(ORDER.size(), 5); + assertEquals(ORDER.get(4), "s5.init"); - Assert.assertEquals(server.get(MyService5.class).getClass(), MyService5.class); + assertEquals(server.get(MyService5.class).getClass(), MyService5.class); server.destroy(); - Assert.assertEquals(ORDER.size(), 8); - Assert.assertEquals(ORDER.get(5), "s5.destroy"); - Assert.assertEquals(ORDER.get(6), "s3.destroy"); - Assert.assertEquals(ORDER.get(7), "s1.destroy"); + assertEquals(ORDER.size(), 8); + assertEquals(ORDER.get(5), "s5.destroy"); + assertEquals(ORDER.get(6), "s3.destroy"); + assertEquals(ORDER.get(7), "s1.destroy"); // service add via setService exception ORDER.clear(); @@ -765,15 +771,15 @@ public class TestServer extends HTestCase { server.init(); try { server.setService(MyService7.class); - Assert.fail(); + fail(); } catch (ServerException ex) { - Assert.assertEquals(ServerException.ERROR.S09, ex.getError()); + assertEquals(ServerException.ERROR.S09, ex.getError()); } catch (Exception ex) { - Assert.fail(); + fail(); } - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s3.destroy"); - Assert.assertEquals(ORDER.get(5), "s1.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s3.destroy"); + assertEquals(ORDER.get(5), "s1.destroy"); // service with dependency ORDER.clear(); @@ -782,8 +788,8 @@ public class TestServer extends HTestCase { conf.set("server.services", services); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); - Assert.assertEquals(server.get(MyService6.class).getInterface(), MyService6.class); + assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); + assertEquals(server.get(MyService6.class).getInterface(), MyService6.class); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java index 72913eebb5b..6b7c6286d09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java @@ -18,15 +18,15 @@ package org.apache.hadoop.lib.server; +import java.util.Arrays; +import java.util.Collection; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.HTestCase; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.util.Arrays; -import java.util.Collection; - @RunWith(value = Parameterized.class) public class TestServerConstructor extends HTestCase { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java index b8689c9d6e4..192fdd17049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java @@ -18,7 +18,16 @@ package org.apache.hadoop.lib.service.hadoop; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -38,12 +47,6 @@ import org.apache.hadoop.util.StringUtils; import org.junit.Before; import org.junit.Test; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.util.Arrays; - public class TestFileSystemAccessService extends HFSTestCase { private void createHadoopConf(Configuration hadoopConf) throws Exception { @@ -71,7 +74,7 @@ public class TestFileSystemAccessService extends HFSTestCase { conf.set("server.services", services); Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertNotNull(server.get(FileSystemAccess.class)); + assertNotNull(server.get(FileSystemAccess.class)); server.destroy(); } @@ -148,7 +151,7 @@ public class TestFileSystemAccessService extends HFSTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); - Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO"); + assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO"); server.destroy(); } @@ -174,7 +177,7 @@ public class TestFileSystemAccessService extends HFSTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); - Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR"); + assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR"); server.destroy(); } @@ -245,15 +248,15 @@ public class TestFileSystemAccessService extends HFSTestCase { server.init(); FileSystemAccess hadoop = server.get(FileSystemAccess.class); FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration()); - Assert.assertNotNull(fs); + assertNotNull(fs); fs.mkdirs(new Path("/tmp/foo")); hadoop.releaseFileSystem(fs); try { fs.mkdirs(new Path("/tmp/foo")); - Assert.fail(); + fail(); } catch (IOException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } server.destroy(); } @@ -288,10 +291,10 @@ public class TestFileSystemAccessService extends HFSTestCase { }); try { fsa[0].mkdirs(new Path("/tmp/foo")); - Assert.fail(); + fail(); } catch (IOException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } server.destroy(); } @@ -351,19 +354,19 @@ public class TestFileSystemAccessService extends HFSTestCase { throw new IOException(); } }); - Assert.fail(); + fail(); } catch (FileSystemAccessException ex) { - Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03); + assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03); } catch (Exception ex) { - Assert.fail(); + fail(); } try { fsa[0].mkdirs(new Path("/tmp/foo")); - Assert.fail(); + fail(); } catch (IOException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java index 5bd036339b9..c609fefc80b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java @@ -18,7 +18,16 @@ package org.apache.hadoop.lib.service.instrumentation; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.StringWriter; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.service.Instrumentation; @@ -32,11 +41,6 @@ import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; import org.junit.Test; -import java.io.StringWriter; -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - public class TestInstrumentationService extends HTestCase { @Override @@ -47,51 +51,51 @@ public class TestInstrumentationService extends HTestCase { @Test public void cron() { InstrumentationService.Cron cron = new InstrumentationService.Cron(); - Assert.assertEquals(cron.start, 0); - Assert.assertEquals(cron.lapStart, 0); - Assert.assertEquals(cron.own, 0); - Assert.assertEquals(cron.total, 0); + assertEquals(cron.start, 0); + assertEquals(cron.lapStart, 0); + assertEquals(cron.own, 0); + assertEquals(cron.total, 0); long begin = Time.now(); - Assert.assertEquals(cron.start(), cron); - Assert.assertEquals(cron.start(), cron); - Assert.assertEquals(cron.start, begin, 20); - Assert.assertEquals(cron.start, cron.lapStart); + assertEquals(cron.start(), cron); + assertEquals(cron.start(), cron); + assertEquals(cron.start, begin, 20); + assertEquals(cron.start, cron.lapStart); sleep(100); - Assert.assertEquals(cron.stop(), cron); + assertEquals(cron.stop(), cron); long end = Time.now(); long delta = end - begin; - Assert.assertEquals(cron.own, delta, 20); - Assert.assertEquals(cron.total, 0); - Assert.assertEquals(cron.lapStart, 0); + assertEquals(cron.own, delta, 20); + assertEquals(cron.total, 0); + assertEquals(cron.lapStart, 0); sleep(100); long reStart = Time.now(); cron.start(); - Assert.assertEquals(cron.start, begin, 20); - Assert.assertEquals(cron.lapStart, reStart, 20); + assertEquals(cron.start, begin, 20); + assertEquals(cron.lapStart, reStart, 20); sleep(100); cron.stop(); long reEnd = Time.now(); delta += reEnd - reStart; - Assert.assertEquals(cron.own, delta, 20); - Assert.assertEquals(cron.total, 0); - Assert.assertEquals(cron.lapStart, 0); + assertEquals(cron.own, delta, 20); + assertEquals(cron.total, 0); + assertEquals(cron.lapStart, 0); cron.end(); - Assert.assertEquals(cron.total, reEnd - begin, 20); + assertEquals(cron.total, reEnd - begin, 20); try { cron.start(); - Assert.fail(); + fail(); } catch (IllegalStateException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } try { cron.stop(); - Assert.fail(); + fail(); } catch (IllegalStateException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } } @@ -135,10 +139,10 @@ public class TestInstrumentationService extends HTestCase { timer.addCron(cron); long[] values = timer.getValues(); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); cron = new InstrumentationService.Cron(); @@ -168,10 +172,10 @@ public class TestInstrumentationService extends HTestCase { timer.addCron(cron); values = timer.getValues(); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); avgTotal = totalDelta; avgOwn = ownDelta; @@ -205,27 +209,27 @@ public class TestInstrumentationService extends HTestCase { cron.stop(); timer.addCron(cron); values = timer.getValues(); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); JSONObject json = (JSONObject) new JSONParser().parse(timer.toJSONString()); - Assert.assertEquals(json.size(), 4); - Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); - Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); - Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); - Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); + assertEquals(json.size(), 4); + assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); + assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); + assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); + assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); StringWriter writer = new StringWriter(); timer.writeJSONString(writer); writer.close(); json = (JSONObject) new JSONParser().parse(writer.toString()); - Assert.assertEquals(json.size(), 4); - Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); - Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); - Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); - Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); + assertEquals(json.size(), 4); + assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); + assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); + assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); + assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); } @Test @@ -240,34 +244,34 @@ public class TestInstrumentationService extends HTestCase { InstrumentationService.Sampler sampler = new InstrumentationService.Sampler(); sampler.init(4, var); - Assert.assertEquals(sampler.getRate(), 0f, 0.0001); + assertEquals(sampler.getRate(), 0f, 0.0001); sampler.sample(); - Assert.assertEquals(sampler.getRate(), 0f, 0.0001); + assertEquals(sampler.getRate(), 0f, 0.0001); value[0] = 1; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001); + assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001); value[0] = 2; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001); + assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001); value[0] = 3; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001); + assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001); value[0] = 4; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001); + assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001); JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString()); - Assert.assertEquals(json.size(), 2); - Assert.assertEquals(json.get("sampler"), sampler.getRate()); - Assert.assertEquals(json.get("size"), 4L); + assertEquals(json.size(), 2); + assertEquals(json.get("sampler"), sampler.getRate()); + assertEquals(json.get("size"), 4L); StringWriter writer = new StringWriter(); sampler.writeJSONString(writer); writer.close(); json = (JSONObject) new JSONParser().parse(writer.toString()); - Assert.assertEquals(json.size(), 2); - Assert.assertEquals(json.get("sampler"), sampler.getRate()); - Assert.assertEquals(json.get("size"), 4L); + assertEquals(json.size(), 2); + assertEquals(json.get("sampler"), sampler.getRate()); + assertEquals(json.get("size"), 4L); } @Test @@ -283,15 +287,15 @@ public class TestInstrumentationService extends HTestCase { }; JSONObject json = (JSONObject) new JSONParser().parse(variableHolder.toJSONString()); - Assert.assertEquals(json.size(), 1); - Assert.assertEquals(json.get("value"), "foo"); + assertEquals(json.size(), 1); + assertEquals(json.get("value"), "foo"); StringWriter writer = new StringWriter(); variableHolder.writeJSONString(writer); writer.close(); json = (JSONObject) new JSONParser().parse(writer.toString()); - Assert.assertEquals(json.size(), 1); - Assert.assertEquals(json.get("value"), "foo"); + assertEquals(json.size(), 1); + assertEquals(json.get("value"), "foo"); } @Test @@ -306,7 +310,7 @@ public class TestInstrumentationService extends HTestCase { server.init(); Instrumentation instrumentation = server.get(Instrumentation.class); - Assert.assertNotNull(instrumentation); + assertNotNull(instrumentation); instrumentation.incr("g", "c", 1); instrumentation.incr("g", "c", 2); instrumentation.incr("g", "c1", 2); @@ -339,27 +343,27 @@ public class TestInstrumentationService extends HTestCase { instrumentation.addSampler("g", "s", 10, varToSample); Map snapshot = instrumentation.getSnapshot(); - Assert.assertNotNull(snapshot.get("os-env")); - Assert.assertNotNull(snapshot.get("sys-props")); - Assert.assertNotNull(snapshot.get("jvm")); - Assert.assertNotNull(snapshot.get("counters")); - Assert.assertNotNull(snapshot.get("timers")); - Assert.assertNotNull(snapshot.get("variables")); - Assert.assertNotNull(snapshot.get("samplers")); - Assert.assertNotNull(((Map) snapshot.get("os-env")).get("PATH")); - Assert.assertNotNull(((Map) snapshot.get("sys-props")).get("java.version")); - Assert.assertNotNull(((Map) snapshot.get("jvm")).get("free.memory")); - Assert.assertNotNull(((Map) snapshot.get("jvm")).get("max.memory")); - Assert.assertNotNull(((Map) snapshot.get("jvm")).get("total.memory")); - Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("timers")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("variables")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("samplers")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c")); - Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c1")); - Assert.assertNotNull(((Map>) snapshot.get("timers")).get("g").get("t")); - Assert.assertNotNull(((Map>) snapshot.get("variables")).get("g").get("v")); - Assert.assertNotNull(((Map>) snapshot.get("samplers")).get("g").get("s")); + assertNotNull(snapshot.get("os-env")); + assertNotNull(snapshot.get("sys-props")); + assertNotNull(snapshot.get("jvm")); + assertNotNull(snapshot.get("counters")); + assertNotNull(snapshot.get("timers")); + assertNotNull(snapshot.get("variables")); + assertNotNull(snapshot.get("samplers")); + assertNotNull(((Map) snapshot.get("os-env")).get("PATH")); + assertNotNull(((Map) snapshot.get("sys-props")).get("java.version")); + assertNotNull(((Map) snapshot.get("jvm")).get("free.memory")); + assertNotNull(((Map) snapshot.get("jvm")).get("max.memory")); + assertNotNull(((Map) snapshot.get("jvm")).get("total.memory")); + assertNotNull(((Map>) snapshot.get("counters")).get("g")); + assertNotNull(((Map>) snapshot.get("timers")).get("g")); + assertNotNull(((Map>) snapshot.get("variables")).get("g")); + assertNotNull(((Map>) snapshot.get("samplers")).get("g")); + assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c")); + assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c1")); + assertNotNull(((Map>) snapshot.get("timers")).get("g").get("t")); + assertNotNull(((Map>) snapshot.get("variables")).get("g").get("v")); + assertNotNull(((Map>) snapshot.get("samplers")).get("g").get("s")); StringWriter writer = new StringWriter(); JSONObject.writeJSONString(snapshot, writer); @@ -392,12 +396,12 @@ public class TestInstrumentationService extends HTestCase { sleep(2000); int i = count.get(); - Assert.assertTrue(i > 0); + assertTrue(i > 0); Map> snapshot = instrumentation.getSnapshot(); Map> samplers = (Map>) snapshot.get("samplers"); InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s"); - Assert.assertTrue(sampler.getRate() > 0); + assertTrue(sampler.getRate() > 0); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java index 5e4a982b635..f8abb48e7aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java @@ -18,7 +18,10 @@ package org.apache.hadoop.lib.service.scheduler; -import junit.framework.Assert; +import static org.junit.Assert.assertNotNull; + +import java.util.Arrays; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.service.Scheduler; @@ -29,8 +32,6 @@ import org.apache.hadoop.test.TestDirHelper; import org.apache.hadoop.util.StringUtils; import org.junit.Test; -import java.util.Arrays; - public class TestSchedulerService extends HTestCase { @Test @@ -42,7 +43,7 @@ public class TestSchedulerService extends HTestCase { SchedulerService.class.getName()))); Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertNotNull(server.get(Scheduler.class)); + assertNotNull(server.get(Scheduler.class)); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java index bd2f0ac07bf..167690902aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.lib.service.security; -import org.apache.hadoop.security.GroupMappingServiceProvider; -import org.apache.hadoop.test.HadoopUsersConfTestHelper; - import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.apache.hadoop.security.GroupMappingServiceProvider; +import org.apache.hadoop.test.HadoopUsersConfTestHelper; + public class DummyGroupMapping implements GroupMappingServiceProvider { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java index bb4a29cae27..445192b66fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java @@ -18,7 +18,12 @@ package org.apache.hadoop.lib.service.security; -import junit.framework.Assert; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; + +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.service.Groups; @@ -28,9 +33,6 @@ import org.apache.hadoop.test.TestDirHelper; import org.apache.hadoop.util.StringUtils; import org.junit.Test; -import java.util.Arrays; -import java.util.List; - public class TestGroupsService extends HTestCase { @Test @@ -42,9 +44,9 @@ public class TestGroupsService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); Groups groups = server.get(Groups.class); - Assert.assertNotNull(groups); + assertNotNull(groups); List g = groups.getGroups(System.getProperty("user.name")); - Assert.assertNotSame(g.size(), 0); + assertNotSame(g.size(), 0); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java index 3d4115e8c7e..294f5e80b24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java @@ -18,7 +18,12 @@ package org.apache.hadoop.lib.service.security; -import junit.framework.Assert; +import static org.junit.Assert.assertNotNull; + +import java.security.AccessControlException; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.server.ServiceException; @@ -31,10 +36,6 @@ import org.apache.hadoop.test.TestException; import org.apache.hadoop.util.StringUtils; import org.junit.Test; -import java.security.AccessControlException; -import java.util.Arrays; -import java.util.List; - public class TestProxyUserService extends HTestCase { @Test @@ -47,7 +48,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); server.destroy(); } @@ -103,7 +104,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", "bar"); server.destroy(); } @@ -120,7 +121,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("bar", "localhost", "foo"); server.destroy(); } @@ -137,7 +138,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", "bar"); server.destroy(); } @@ -166,7 +167,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", System.getProperty("user.name")); server.destroy(); } @@ -184,7 +185,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "unknownhost.bar.foo", "bar"); server.destroy(); } @@ -201,7 +202,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "www.yahoo.com", "bar"); server.destroy(); } @@ -218,7 +219,7 @@ public class TestProxyUserService extends HTestCase { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", System.getProperty("user.name")); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java index f3a2a5ad654..44da0afd705 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java @@ -18,18 +18,21 @@ package org.apache.hadoop.lib.servlet; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; -import org.mockito.Mockito; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.mockito.Mockito; public class TestHostnameFilter extends HTestCase { @@ -47,17 +50,17 @@ public class TestHostnameFilter extends HTestCase { @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertTrue(HostnameFilter.get().contains("localhost")); + assertTrue(HostnameFilter.get().contains("localhost")); invoked.set(true); } }; Filter filter = new HostnameFilter(); filter.init(null); - Assert.assertNull(HostnameFilter.get()); + assertNull(HostnameFilter.get()); filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); - Assert.assertNull(HostnameFilter.get()); + assertTrue(invoked.get()); + assertNull(HostnameFilter.get()); filter.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java index 216af5fa505..911cc0ad230 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java @@ -18,11 +18,13 @@ package org.apache.hadoop.lib.servlet; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; -import org.mockito.Mockito; -import org.slf4j.MDC; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.security.Principal; +import java.util.concurrent.atomic.AtomicBoolean; import javax.servlet.Filter; import javax.servlet.FilterChain; @@ -30,9 +32,11 @@ import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; -import java.io.IOException; -import java.security.Principal; -import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.mockito.Mockito; +import org.slf4j.MDC; public class TestMDCFilter extends HTestCase { @@ -52,10 +56,10 @@ public class TestMDCFilter extends HTestCase { @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertEquals(MDC.get("hostname"), null); - Assert.assertEquals(MDC.get("user"), null); - Assert.assertEquals(MDC.get("method"), "METHOD"); - Assert.assertEquals(MDC.get("path"), "/pathinfo"); + assertEquals(MDC.get("hostname"), null); + assertEquals(MDC.get("user"), null); + assertEquals(MDC.get("method"), "METHOD"); + assertEquals(MDC.get("path"), "/pathinfo"); invoked.set(true); } }; @@ -65,11 +69,11 @@ public class TestMDCFilter extends HTestCase { filter.init(null); filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); - Assert.assertNull(MDC.get("hostname")); - Assert.assertNull(MDC.get("user")); - Assert.assertNull(MDC.get("method")); - Assert.assertNull(MDC.get("path")); + assertTrue(invoked.get()); + assertNull(MDC.get("hostname")); + assertNull(MDC.get("user")); + assertNull(MDC.get("method")); + assertNull(MDC.get("path")); Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() { @Override @@ -83,15 +87,15 @@ public class TestMDCFilter extends HTestCase { @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertEquals(MDC.get("hostname"), null); - Assert.assertEquals(MDC.get("user"), "name"); - Assert.assertEquals(MDC.get("method"), "METHOD"); - Assert.assertEquals(MDC.get("path"), "/pathinfo"); + assertEquals(MDC.get("hostname"), null); + assertEquals(MDC.get("user"), "name"); + assertEquals(MDC.get("method"), "METHOD"); + assertEquals(MDC.get("path"), "/pathinfo"); invoked.set(true); } }; filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); + assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.set("HOST"); @@ -100,15 +104,15 @@ public class TestMDCFilter extends HTestCase { @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertEquals(MDC.get("hostname"), "HOST"); - Assert.assertEquals(MDC.get("user"), "name"); - Assert.assertEquals(MDC.get("method"), "METHOD"); - Assert.assertEquals(MDC.get("path"), "/pathinfo"); + assertEquals(MDC.get("hostname"), "HOST"); + assertEquals(MDC.get("user"), "name"); + assertEquals(MDC.get("method"), "METHOD"); + assertEquals(MDC.get("path"), "/pathinfo"); invoked.set(true); } }; filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); + assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.remove(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java index 380fa3e0812..0234266e4ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java @@ -18,7 +18,8 @@ package org.apache.hadoop.lib.servlet; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; + import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.test.HTestCase; import org.apache.hadoop.test.TestDir; @@ -35,10 +36,10 @@ public class TestServerWebApp extends HTestCase { @Test public void getHomeDir() { System.setProperty("TestServerWebApp0.home.dir", "/tmp"); - Assert.assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp"); - Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log"); + assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp"); + assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log"); System.setProperty("TestServerWebApp0.log.dir", "/tmplog"); - Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog"); + assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog"); } @Test @@ -52,11 +53,11 @@ public class TestServerWebApp extends HTestCase { ServerWebApp server = new ServerWebApp("TestServerWebApp1") { }; - Assert.assertEquals(server.getStatus(), Server.Status.UNDEF); + assertEquals(server.getStatus(), Server.Status.UNDEF); server.contextInitialized(null); - Assert.assertEquals(server.getStatus(), Server.Status.NORMAL); + assertEquals(server.getStatus(), Server.Status.NORMAL); server.contextDestroyed(null); - Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN); + assertEquals(server.getStatus(), Server.Status.SHUTDOWN); } @Test(expected = RuntimeException.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java index 532ad369de6..877dcd46bc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java @@ -19,18 +19,19 @@ package org.apache.hadoop.lib.util; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; +import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.Arrays; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; + public class TestCheck extends HTestCase { @Test public void notNullNotNull() { - Assert.assertEquals(Check.notNull("value", "name"), "value"); + assertEquals(Check.notNull("value", "name"), "value"); } @Test(expected = IllegalArgumentException.class) @@ -79,7 +80,7 @@ public class TestCheck extends HTestCase { @Test public void notEmptyNotEmtpy() { - Assert.assertEquals(Check.notEmpty("value", "name"), "value"); + assertEquals(Check.notEmpty("value", "name"), "value"); } @Test(expected = IllegalArgumentException.class) @@ -94,10 +95,10 @@ public class TestCheck extends HTestCase { @Test public void validIdentifierValid() throws Exception { - Assert.assertEquals(Check.validIdentifier("a", 1, ""), "a"); - Assert.assertEquals(Check.validIdentifier("a1", 2, ""), "a1"); - Assert.assertEquals(Check.validIdentifier("a_", 3, ""), "a_"); - Assert.assertEquals(Check.validIdentifier("_", 1, ""), "_"); + assertEquals(Check.validIdentifier("a", 1, ""), "a"); + assertEquals(Check.validIdentifier("a1", 2, ""), "a1"); + assertEquals(Check.validIdentifier("a_", 3, ""), "a_"); + assertEquals(Check.validIdentifier("_", 1, ""), "_"); } @Test(expected = IllegalArgumentException.class) @@ -117,7 +118,7 @@ public class TestCheck extends HTestCase { @Test public void checkGTZeroGreater() { - Assert.assertEquals(Check.gt0(120, "test"), 120); + assertEquals(Check.gt0(120, "test"), 120); } @Test(expected = IllegalArgumentException.class) @@ -132,8 +133,8 @@ public class TestCheck extends HTestCase { @Test public void checkGEZero() { - Assert.assertEquals(Check.ge0(120, "test"), 120); - Assert.assertEquals(Check.ge0(0, "test"), 0); + assertEquals(Check.ge0(120, "test"), 120); + assertEquals(Check.ge0(0, "test"), 0); } @Test(expected = IllegalArgumentException.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java index 48b5f9155f1..925edc54084 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java @@ -18,27 +18,29 @@ package org.apache.hadoop.lib.util; -import junit.framework.Assert; -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + public class TestConfigurationUtils { @Test public void constructors() throws Exception { Configuration conf = new Configuration(false); - Assert.assertEquals(conf.size(), 0); + assertEquals(conf.size(), 0); byte[] bytes = "aA".getBytes(); InputStream is = new ByteArrayInputStream(bytes); conf = new Configuration(false); ConfigurationUtils.load(conf, is); - Assert.assertEquals(conf.size(), 1); - Assert.assertEquals(conf.get("a"), "A"); + assertEquals(conf.size(), 1); + assertEquals(conf.get("a"), "A"); } @@ -62,9 +64,9 @@ public class TestConfigurationUtils { ConfigurationUtils.copy(srcConf, targetConf); - Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); - Assert.assertEquals("valueFromSource", targetConf.get("testParameter2")); - Assert.assertEquals("valueFromTarget", targetConf.get("testParameter3")); + assertEquals("valueFromSource", targetConf.get("testParameter1")); + assertEquals("valueFromSource", targetConf.get("testParameter2")); + assertEquals("valueFromTarget", targetConf.get("testParameter3")); } @Test @@ -80,13 +82,13 @@ public class TestConfigurationUtils { ConfigurationUtils.injectDefaults(srcConf, targetConf); - Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); - Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter2")); - Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter3")); + assertEquals("valueFromSource", targetConf.get("testParameter1")); + assertEquals("originalValueFromTarget", targetConf.get("testParameter2")); + assertEquals("originalValueFromTarget", targetConf.get("testParameter3")); - Assert.assertEquals("valueFromSource", srcConf.get("testParameter1")); - Assert.assertEquals("valueFromSource", srcConf.get("testParameter2")); - Assert.assertNull(srcConf.get("testParameter3")); + assertEquals("valueFromSource", srcConf.get("testParameter1")); + assertEquals("valueFromSource", srcConf.get("testParameter2")); + assertNull(srcConf.get("testParameter3")); } @@ -95,11 +97,11 @@ public class TestConfigurationUtils { Configuration conf = new Configuration(false); conf.set("a", "A"); conf.set("b", "${a}"); - Assert.assertEquals(conf.getRaw("a"), "A"); - Assert.assertEquals(conf.getRaw("b"), "${a}"); + assertEquals(conf.getRaw("a"), "A"); + assertEquals(conf.getRaw("b"), "${a}"); conf = ConfigurationUtils.resolve(conf); - Assert.assertEquals(conf.getRaw("a"), "A"); - Assert.assertEquals(conf.getRaw("b"), "A"); + assertEquals(conf.getRaw("a"), "A"); + assertEquals(conf.getRaw("b"), "A"); } @Test @@ -110,16 +112,16 @@ public class TestConfigurationUtils { conf.set("b", "${a}"); conf.set("c", "${user.name}"); conf.set("d", "${aaa}"); - Assert.assertEquals(conf.getRaw("a"), "A"); - Assert.assertEquals(conf.getRaw("b"), "${a}"); - Assert.assertEquals(conf.getRaw("c"), "${user.name}"); - Assert.assertEquals(conf.get("a"), "A"); - Assert.assertEquals(conf.get("b"), "A"); - Assert.assertEquals(conf.get("c"), userName); - Assert.assertEquals(conf.get("d"), "${aaa}"); + assertEquals(conf.getRaw("a"), "A"); + assertEquals(conf.getRaw("b"), "${a}"); + assertEquals(conf.getRaw("c"), "${user.name}"); + assertEquals(conf.get("a"), "A"); + assertEquals(conf.get("b"), "A"); + assertEquals(conf.get("c"), userName); + assertEquals(conf.get("d"), "${aaa}"); conf.set("user.name", "foo"); - Assert.assertEquals(conf.get("user.name"), "foo"); + assertEquals(conf.get("user.name"), "foo"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java index c3e0200d6ea..0fa94093064 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java @@ -18,13 +18,14 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.junit.Test; +import static org.junit.Assert.assertEquals; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.InputStream; +import org.junit.Test; + public class TestInputStreamEntity { @Test @@ -34,14 +35,14 @@ public class TestInputStreamEntity { InputStreamEntity i = new InputStreamEntity(is); i.write(baos); baos.close(); - Assert.assertEquals(new String(baos.toByteArray()), "abc"); + assertEquals(new String(baos.toByteArray()), "abc"); is = new ByteArrayInputStream("abc".getBytes()); baos = new ByteArrayOutputStream(); i = new InputStreamEntity(is, 1, 1); i.write(baos); baos.close(); - Assert.assertEquals(baos.toByteArray()[0], 'b'); + assertEquals(baos.toByteArray()[0], 'b'); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java index afb07572e7a..099378032dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java @@ -18,28 +18,31 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.json.simple.JSONObject; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.util.Map; +import org.json.simple.JSONObject; +import org.junit.Test; + public class TestJSONMapProvider { @Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONMapProvider p = new JSONMapProvider(); - Assert.assertTrue(p.isWriteable(Map.class, null, null, null)); - Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null)); - Assert.assertEquals(p.getSize(null, null, null, null, null), -1); + assertTrue(p.isWriteable(Map.class, null, null, null)); + assertFalse(p.isWriteable(this.getClass(), null, null, null)); + assertEquals(p.getSize(null, null, null, null, null), -1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); JSONObject json = new JSONObject(); json.put("a", "A"); p.writeTo(json, JSONObject.class, null, null, null, null, baos); baos.close(); - Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); + assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java index a9ac9a2d746..5f747500ed5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java @@ -18,27 +18,30 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.json.simple.JSONObject; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; +import org.json.simple.JSONObject; +import org.junit.Test; + public class TestJSONProvider { @Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONProvider p = new JSONProvider(); - Assert.assertTrue(p.isWriteable(JSONObject.class, null, null, null)); - Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null)); - Assert.assertEquals(p.getSize(null, null, null, null, null), -1); + assertTrue(p.isWriteable(JSONObject.class, null, null, null)); + assertFalse(p.isWriteable(this.getClass(), null, null, null)); + assertEquals(p.getSize(null, null, null, null, null), -1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); JSONObject json = new JSONObject(); json.put("a", "A"); p.writeTo(json, JSONObject.class, null, null, null, null, baos); baos.close(); - Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); + assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java index e2376879d7d..92719db7a7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java @@ -18,41 +18,43 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.util.regex.Pattern; +import org.junit.Test; + public class TestParam { private void test(Param param, String name, String domain, T defaultValue, T validValue, String invalidStrValue, String outOfRangeValue) throws Exception { - Assert.assertEquals(name, param.getName()); - Assert.assertEquals(domain, param.getDomain()); - Assert.assertEquals(defaultValue, param.value()); - Assert.assertEquals(defaultValue, param.parseParam("")); - Assert.assertEquals(defaultValue, param.parseParam(null)); - Assert.assertEquals(validValue, param.parseParam(validValue.toString())); + assertEquals(name, param.getName()); + assertEquals(domain, param.getDomain()); + assertEquals(defaultValue, param.value()); + assertEquals(defaultValue, param.parseParam("")); + assertEquals(defaultValue, param.parseParam(null)); + assertEquals(validValue, param.parseParam(validValue.toString())); if (invalidStrValue != null) { try { param.parseParam(invalidStrValue); - Assert.fail(); + fail(); } catch (IllegalArgumentException ex) { //NOP } catch (Exception ex) { - Assert.fail(); + fail(); } } if (outOfRangeValue != null) { try { param.parseParam(outOfRangeValue); - Assert.fail(); + fail(); } catch (IllegalArgumentException ex) { //NOP } catch (Exception ex) { - Assert.fail(); + fail(); } } } @@ -81,7 +83,7 @@ public class TestParam { param = new ShortParam("S", (short) 1, 8) { }; - Assert.assertEquals(new Short((short)01777), param.parse("01777")); + assertEquals(new Short((short)01777), param.parse("01777")); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java index 72d79a93929..2e5c646f374 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java @@ -18,16 +18,20 @@ package org.apache.hadoop.lib.wsrs; -import com.sun.jersey.api.core.HttpContext; -import com.sun.jersey.api.core.HttpRequestContext; -import com.sun.jersey.core.spi.component.ComponentScope; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import java.security.Principal; + +import javax.ws.rs.core.MultivaluedMap; + import org.junit.Test; import org.mockito.Mockito; import org.slf4j.MDC; -import javax.ws.rs.core.MultivaluedMap; -import java.security.Principal; +import com.sun.jersey.api.core.HttpContext; +import com.sun.jersey.api.core.HttpRequestContext; +import com.sun.jersey.core.spi.component.ComponentScope; public class TestUserProvider { @@ -43,8 +47,8 @@ public class TestUserProvider { HttpContext context = Mockito.mock(HttpContext.class); Mockito.when(context.getRequest()).thenReturn(request); UserProvider up = new UserProvider(); - Assert.assertNull(up.getValue(context)); - Assert.assertNull(MDC.get("user")); + assertNull(up.getValue(context)); + assertNull(MDC.get("user")); } @Test @@ -59,8 +63,8 @@ public class TestUserProvider { HttpContext context = Mockito.mock(HttpContext.class); Mockito.when(context.getRequest()).thenReturn(request); UserProvider up = new UserProvider(); - Assert.assertEquals(up.getValue(context).getName(), "foo"); - Assert.assertEquals(MDC.get("user"), "foo"); + assertEquals(up.getValue(context).getName(), "foo"); + assertEquals(MDC.get("user"), "foo"); } @Test @@ -77,15 +81,15 @@ public class TestUserProvider { HttpContext context = Mockito.mock(HttpContext.class); Mockito.when(context.getRequest()).thenReturn(request); UserProvider up = new UserProvider(); - Assert.assertEquals(up.getValue(context).getName(), "bar"); - Assert.assertEquals(MDC.get("user"), "bar"); + assertEquals(up.getValue(context).getName(), "bar"); + assertEquals(MDC.get("user"), "bar"); } @Test public void getters() { UserProvider up = new UserProvider(); - Assert.assertEquals(up.getScope(), ComponentScope.PerRequest); - Assert.assertEquals(up.getInjectable(null, null, Principal.class), up); - Assert.assertNull(up.getInjectable(null, null, String.class)); + assertEquals(up.getScope(), ComponentScope.PerRequest); + assertEquals(up.getInjectable(null, null, Principal.class), up); + assertNull(up.getInjectable(null, null, String.class)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java index 316fc9f7a2b..38956994d53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.test; -import junit.framework.Assert; +import static org.junit.Assert.fail; + +import java.text.MessageFormat; import org.apache.hadoop.util.Time; import org.junit.Rule; import org.junit.rules.MethodRule; -import java.text.MessageFormat; - public abstract class HTestCase { public static final String TEST_WAITFOR_RATIO_PROP = "test.waitfor.ratio"; @@ -161,7 +161,7 @@ public abstract class HTestCase { } if (!eval) { if (failIfTimeout) { - Assert.fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); + fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); } else { System.out.println(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java index f27d0efaae9..57af33664e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.test; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.UserGroupInformation; - import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; + /** * Helper to configure FileSystemAccess user/group and proxyuser * configuration for testing using Java System properties. diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java index c3f3d53c38d..3368c79c7a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java @@ -17,16 +17,16 @@ */ package org.apache.hadoop.test; -import org.junit.Test; -import org.junit.rules.MethodRule; -import org.junit.runners.model.FrameworkMethod; -import org.junit.runners.model.Statement; - import java.io.File; import java.io.IOException; import java.text.MessageFormat; import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.Statement; + public class TestDirHelper implements MethodRule { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java index 8411db47547..e3af6435132 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.test; -import junit.framework.Assert; +import static org.junit.Assert.fail; + +import java.util.regex.Pattern; + import org.junit.Test; import org.junit.rules.MethodRule; import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.Statement; -import java.util.regex.Pattern; - public class TestExceptionHelper implements MethodRule { @Test @@ -41,7 +42,7 @@ public class TestExceptionHelper implements MethodRule { statement.evaluate(); if (testExceptionAnnotation != null) { Class klass = testExceptionAnnotation.exception(); - Assert.fail("Expected Exception: " + klass.getSimpleName()); + fail("Expected Exception: " + klass.getSimpleName()); } } catch (Throwable ex) { if (testExceptionAnnotation != null) { @@ -50,10 +51,10 @@ public class TestExceptionHelper implements MethodRule { String regExp = testExceptionAnnotation.msgRegExp(); Pattern pattern = Pattern.compile(regExp); if (!pattern.matcher(ex.getMessage()).find()) { - Assert.fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage()); + fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage()); } } else { - Assert.fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName()); + fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName()); } } else { throw ex; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java index b001c1cb989..f4996de542c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java @@ -18,19 +18,9 @@ package org.apache.hadoop.test; -import junit.framework.Assert; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.util.Time; -import org.junit.Test; -import org.mortbay.jetty.Server; -import org.mortbay.jetty.servlet.Context; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -39,6 +29,19 @@ import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URL; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Time; +import org.junit.Test; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.servlet.Context; + public class TestHFSTestCase extends HFSTestCase { @Test(expected = IllegalStateException.class) @@ -69,7 +72,7 @@ public class TestHFSTestCase extends HFSTestCase { @Test @TestDir public void testDirAnnotation() throws Exception { - Assert.assertNotNull(TestDirHelper.getTestDir()); + assertNotNull(TestDirHelper.getTestDir()); } @Test @@ -81,8 +84,8 @@ public class TestHFSTestCase extends HFSTestCase { } }); long end = Time.now(); - Assert.assertEquals(waited, 0, 50); - Assert.assertEquals(end - start - waited, 0, 50); + assertEquals(waited, 0, 50); + assertEquals(end - start - waited, 0, 50); } @Test @@ -95,8 +98,8 @@ public class TestHFSTestCase extends HFSTestCase { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200, 50); + assertEquals(waited, -1); + assertEquals(end - start, 200, 50); } @Test @@ -109,8 +112,8 @@ public class TestHFSTestCase extends HFSTestCase { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(waited, -1); + assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); } @Test @@ -119,7 +122,7 @@ public class TestHFSTestCase extends HFSTestCase { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100, 50); + assertEquals(end - start, 100, 50); } @Test @@ -128,7 +131,7 @@ public class TestHFSTestCase extends HFSTestCase { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); } @Test @@ -141,8 +144,8 @@ public class TestHFSTestCase extends HFSTestCase { os.write(new byte[]{1}); os.close(); InputStream is = fs.open(new Path(TestHdfsHelper.getHdfsTestDir(), "foo")); - Assert.assertEquals(is.read(), 1); - Assert.assertEquals(is.read(), -1); + assertEquals(is.read(), 1); + assertEquals(is.read(), -1); is.close(); } finally { fs.close(); @@ -167,9 +170,9 @@ public class TestHFSTestCase extends HFSTestCase { server.start(); URL url = new URL(TestJettyHelper.getJettyURL(), "/bar"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); - Assert.assertEquals(reader.readLine(), "foo"); + assertEquals(reader.readLine(), "foo"); reader.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java index f6af2a6c8e7..10c798f3faa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java @@ -18,23 +18,25 @@ package org.apache.hadoop.test; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; -import org.apache.hadoop.util.Time; -import org.junit.Test; -import org.mortbay.jetty.Server; -import org.mortbay.jetty.servlet.Context; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.util.Time; +import org.junit.Test; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.servlet.Context; + public class TestHTestCase extends HTestCase { @Test(expected = IllegalStateException.class) @@ -55,7 +57,7 @@ public class TestHTestCase extends HTestCase { @Test @TestDir public void testDirAnnotation() throws Exception { - Assert.assertNotNull(TestDirHelper.getTestDir()); + assertNotNull(TestDirHelper.getTestDir()); } @Test @@ -67,8 +69,8 @@ public class TestHTestCase extends HTestCase { } }); long end = Time.now(); - Assert.assertEquals(waited, 0, 50); - Assert.assertEquals(end - start - waited, 0, 50); + assertEquals(waited, 0, 50); + assertEquals(end - start - waited, 0, 50); } @Test @@ -81,8 +83,8 @@ public class TestHTestCase extends HTestCase { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200, 50); + assertEquals(waited, -1); + assertEquals(end - start, 200, 50); } @Test @@ -95,8 +97,8 @@ public class TestHTestCase extends HTestCase { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(waited, -1); + assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); } @Test @@ -105,7 +107,7 @@ public class TestHTestCase extends HTestCase { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100, 50); + assertEquals(end - start, 100, 50); } @Test @@ -114,7 +116,7 @@ public class TestHTestCase extends HTestCase { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); } public static class MyServlet extends HttpServlet { @@ -135,9 +137,9 @@ public class TestHTestCase extends HTestCase { server.start(); URL url = new URL(TestJettyHelper.getJettyURL(), "/bar"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); - Assert.assertEquals(reader.readLine(), "foo"); + assertEquals(reader.readLine(), "foo"); reader.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java index cd030695f9b..2afd7d35a41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.test; +import java.io.File; +import java.util.concurrent.atomic.AtomicInteger; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -26,9 +29,6 @@ import org.junit.Test; import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.Statement; -import java.io.File; -import java.util.concurrent.atomic.AtomicInteger; - public class TestHdfsHelper extends TestDirHelper { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java index 1a4f5b215e0..95cb10463cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.test; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.ServerSocket; +import java.net.URL; + import org.junit.Test; import org.junit.rules.MethodRule; import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.Statement; import org.mortbay.jetty.Server; -import java.net.InetAddress; -import java.net.MalformedURLException; -import java.net.ServerSocket; -import java.net.URL; - public class TestJettyHelper implements MethodRule { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c36c2fb1dcc..a43519c3116 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -168,6 +168,8 @@ Release 2.0.1-alpha - UNRELEASED HDFS-1249. With fuse-dfs, chown which only has owner (or only group) argument fails with Input/output error. (Colin Patrick McCabe via eli) + HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java index ece2261f7a9..89932cc67cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.cli; -import org.apache.hadoop.cli.util.*; +import org.apache.hadoop.cli.util.CLICommandDFSAdmin; +import org.apache.hadoop.cli.util.CLICommandTypes; +import org.apache.hadoop.cli.util.CLITestCmd; +import org.apache.hadoop.cli.util.CommandExecutor; +import org.apache.hadoop.cli.util.FSCmdExecutor; import org.apache.hadoop.hdfs.tools.DFSAdmin; public class CLITestCmdDFS extends CLITestCmd { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java index 0b48665d8ae..ebe7b5d1f67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java @@ -18,6 +18,8 @@ package org.apache.hadoop.cli; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.cli.util.CLICommand; import org.apache.hadoop.cli.util.CommandExecutor.Result; import org.apache.hadoop.fs.FileSystem; @@ -27,7 +29,6 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.authorize.PolicyProvider; import org.junit.After; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 1d90050a7e9..966db91e15f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -17,16 +17,20 @@ */ package org.apache.hadoop.fs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; -import junit.framework.TestCase; - -public class TestGlobPaths extends TestCase { +public class TestGlobPaths { static class RegexPathFilter implements PathFilter { @@ -48,8 +52,8 @@ public class TestGlobPaths extends TestCase { static final String USER_DIR = "/user/"+System.getProperty("user.name"); private Path[] path = new Path[NUM_OF_PATHS]; - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { try { Configuration conf = new HdfsConfiguration(); dfsCluster = new MiniDFSCluster.Builder(conf).build(); @@ -59,13 +63,14 @@ public class TestGlobPaths extends TestCase { } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { if(dfsCluster!=null) { dfsCluster.shutdown(); } } + @Test public void testPathFilter() throws IOException { try { String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" }; @@ -78,6 +83,7 @@ public class TestGlobPaths extends TestCase { } } + @Test public void testPathFilterWithFixedLastComponent() throws IOException { try { String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b", @@ -91,6 +97,7 @@ public class TestGlobPaths extends TestCase { } } + @Test public void testGlob() throws Exception { //pTestEscape(); // need to wait until HADOOP-1995 is fixed pTestJavaRegexSpecialChars(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index 34410ed02f2..a4f2d5fe3f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -18,6 +18,9 @@ package org.apache.hadoop.fs; +import static org.apache.hadoop.fs.FileContextTestHelper.exists; +import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath; + import java.io.IOException; import java.net.URISyntaxException; @@ -27,8 +30,8 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.AfterClass; @@ -37,8 +40,6 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import static org.apache.hadoop.fs.FileContextTestHelper.*; - public class TestHDFSFileContextMainOperations extends FileContextMainOperationsBaseTest { private static MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java index 1d5def6b484..516ff1a3d33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java index bf22c7187fe..d28736cffae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.fs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -25,19 +28,15 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsUrlStreamHandlerFactory; -import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * Test of the URL stream handler factory. */ -public class TestUrlStreamHandler extends TestCase { +public class TestUrlStreamHandler { /** * Test opening and reading from an InputStream through a hdfs:// URL. @@ -47,6 +46,7 @@ public class TestUrlStreamHandler extends TestCase { * * @throws IOException */ + @Test public void testDfsUrls() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -105,6 +105,7 @@ public class TestUrlStreamHandler extends TestCase { * @throws IOException * @throws URISyntaxException */ + @Test public void testFileUrls() throws IOException, URISyntaxException { // URLStreamHandler is already set in JVM by testDfsUrls() Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java index d6890f5c8f1..e9e14ce8b11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.fs.loadGenerator; +import static org.junit.Assert.assertEquals; + import java.io.BufferedReader; import java.io.File; import java.io.FileReader; @@ -27,9 +29,6 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; - -import static org.junit.Assert.*; - import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java index ca4862bd010..359a47ad95f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.fs.permission; -import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import junit.framework.TestCase; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -32,8 +35,9 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Test; -public class TestStickyBit extends TestCase { +public class TestStickyBit { static UserGroupInformation user1 = UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"}); @@ -158,6 +162,7 @@ public class TestStickyBit extends TestCase { assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit()); } + @Test public void testGeneralSBBehavior() throws IOException, InterruptedException { MiniDFSCluster cluster = null; try { @@ -195,6 +200,7 @@ public class TestStickyBit extends TestCase { * Test that one user can't rename/move another user's file when the sticky * bit is set. */ + @Test public void testMovingFiles() throws IOException, InterruptedException { MiniDFSCluster cluster = null; @@ -243,6 +249,7 @@ public class TestStickyBit extends TestCase { * the sticky bit back on re-start, and that no extra sticky bits appear after * re-start. */ + @Test public void testStickyBitPersistence() throws IOException { MiniDFSCluster cluster = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java index 74c32d9c720..74d8bca614d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java @@ -23,6 +23,9 @@ package org.apache.hadoop.fs.viewfs; * Since viewfs has overlayed ViewFsFileStatus, we ran into * serialization problems. THis test is test the fix. */ +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -40,11 +43,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.UserGroupInformation; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.*; public class TestViewFsFileStatusHdfs { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java index ff27da44cb4..b78d7075d86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.io.OutputStream; import java.util.Random; -import junit.framework.Assert; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -121,16 +120,16 @@ public class AppendTestUtil { FSDataInputStream in = fs.open(p); if (in.getWrappedStream() instanceof DFSInputStream) { long len = ((DFSInputStream)in.getWrappedStream()).getFileLength(); - TestCase.assertEquals(length, len); + assertEquals(length, len); } else { - TestCase.assertEquals(length, status.getLen()); + assertEquals(length, status.getLen()); } for(i++; i < length; i++) { - TestCase.assertEquals((byte)i, (byte)in.read()); + assertEquals((byte)i, (byte)in.read()); } i = -(int)length; - TestCase.assertEquals(-1, in.read()); //EOF + assertEquals(-1, in.read()); //EOF in.close(); } catch(IOException ioe) { throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe); @@ -175,7 +174,7 @@ public class AppendTestUtil { private static void checkData(final byte[] actual, int from, final byte[] expected, String message) { for (int idx = 0; idx < actual.length; idx++) { - Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+ + assertEquals(message+" byte "+(from+idx)+" differs. expected "+ expected[from+idx]+" actual "+actual[idx], expected[from+idx], actual[idx]); actual[idx] = 0; @@ -189,7 +188,7 @@ public class AppendTestUtil { final FSDataOutputStream out = fs.create(p, (short)1); out.write(bytes); out.close(); - Assert.assertEquals(bytes.length, fs.getFileStatus(p).getLen()); + assertEquals(bytes.length, fs.getFileStatus(p).getLen()); } for(int i = 2; i < 500; i++) { @@ -197,7 +196,7 @@ public class AppendTestUtil { final FSDataOutputStream out = fs.append(p); out.write(bytes); out.close(); - Assert.assertEquals(i*bytes.length, fs.getFileStatus(p).getLen()); + assertEquals(i*bytes.length, fs.getFileStatus(p).getLen()); } } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java index 0d47b9c5baf..b3dd55a9afd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java @@ -36,7 +36,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; - import org.apache.log4j.Level; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 80503e67ea0..24cac94f6e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -18,25 +18,26 @@ package org.apache.hadoop.hdfs; -import java.net.Socket; -import java.net.InetSocketAddress; -import java.io.DataOutputStream; -import java.util.Random; -import java.util.List; -import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.DataOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.List; +import java.util.Random; + +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.net.NetUtils; -import static org.junit.Assert.*; - /** * A helper class to setup the cluster, and get to BlockReader and DataNode for a block. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 39e6f26c940..b20baa9bd2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -55,7 +55,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSTestUtil.Builder; import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java index d26e2cb4e2c..29306dc704a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -27,8 +29,6 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import java.io.IOException; - /** This is a comprehensive append test that tries * all combinations of file length and number of appended bytes * In each iteration, it creates a file of len1. Then reopen diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java index 582767c8010..9590bc3cf9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.fail; + import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; - -import static org.junit.Assert.*; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java index 117952a7e76..3f29932c1a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java @@ -17,27 +17,27 @@ */ package org.apache.hadoop.hdfs; -import java.util.ArrayList; +import static org.junit.Assert.assertEquals; -import junit.framework.TestCase; -import org.apache.hadoop.conf.Configuration; +import java.util.ArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.junit.Test; /** * This test ensures that the balancer bandwidth is dynamically adjusted * correctly. */ -public class TestBalancerBandwidth extends TestCase { +public class TestBalancerBandwidth { final static private Configuration conf = new Configuration(); final static private int NUM_OF_DATANODES = 2; final static private int DEFAULT_BANDWIDTH = 1024*1024; public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class); + @Test public void testBalancerBandwidth() throws Exception { /* Set bandwidthPerSec to a low value of 1M bps. */ conf.setLong( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java index be6e741a0a0..a885ff4b13d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java @@ -17,26 +17,24 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.BlockMissingException; +import org.junit.Test; -public class TestBlockMissingException extends TestCase { +public class TestBlockMissingException { final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing"); final static int NUM_DATANODES = 3; @@ -47,6 +45,7 @@ public class TestBlockMissingException extends TestCase { /** * Test DFS Raid */ + @Test public void testBlockMissingException() throws Exception { LOG.info("Test testBlockMissingException started."); long blockSize = 1024L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java index e2547144fa3..59dbb302c4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java @@ -17,24 +17,26 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.util.ArrayList; -import junit.framework.TestCase; - import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.junit.Test; /** * This class tests DatanodeDescriptor.getBlocksScheduled() at the * NameNode. This counter is supposed to keep track of blocks currently * scheduled to a datanode. */ -public class TestBlocksScheduledCounter extends TestCase { +public class TestBlocksScheduledCounter { + @Test public void testBlocksScheduledCounter() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java index ec2d41c06de..e7a1e14ddaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java @@ -18,21 +18,20 @@ package org.apache.hadoop.hdfs; -import java.util.List; - -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.fs.Path; -import org.apache.log4j.Level; - -import org.junit.Test; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.never; + +import java.util.List; + +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.log4j.Level; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; public class TestClientBlockVerification { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index a0b7b0d151c..ee39cfe5332 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -26,11 +26,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.io.IOUtils; - -import org.junit.Test; import org.junit.Assert; +import org.junit.Test; /** * This tests pipeline recovery related client protocol works correct or not. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index eb5e08880ff..354a17af9b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -17,37 +17,33 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.spy; + +import java.io.IOException; import java.net.InetSocketAddress; import java.net.Socket; -import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSInputStream; -import org.apache.hadoop.hdfs.SocketCache; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.io.IOUtils; - import org.apache.hadoop.security.token.Token; -import org.junit.Test; import org.junit.AfterClass; import org.junit.BeforeClass; -import static org.junit.Assert.*; - +import org.junit.Test; import org.mockito.Matchers; import org.mockito.Mockito; -import org.mockito.stubbing.Answer; import org.mockito.invocation.InvocationOnMock; -import static org.mockito.Mockito.spy; +import org.mockito.stubbing.Answer; /** * This class tests the client connection caching in a single node diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java index 010d3ff9f6f..15a26d3e437 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java @@ -18,21 +18,23 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; -import java.io.RandomAccessFile; import java.io.IOException; +import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Random; -import org.junit.Test; -import static org.junit.Assert.*; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; +import org.junit.Test; /** * A JUnit test for corrupted file handling. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index d21592e4850..c61c0b1a85e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -24,20 +24,25 @@ */ package org.apache.hadoop.hdfs; -import java.io.IOException; -import java.util.ArrayList; -import junit.framework.TestCase; -import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.junit.Test; -public class TestDFSAddressConfig extends TestCase { +public class TestDFSAddressConfig { + @Test public void testDFSAddressConfig() throws IOException { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java index 8f4bb363800..dccc82f1a42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java @@ -17,15 +17,15 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.fail; + import java.io.IOException; import java.io.OutputStream; -import org.junit.*; -import static org.junit.Assert.fail; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 2f6d39a56fa..520f46382cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyString; @@ -38,8 +42,6 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -75,6 +77,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; +import org.junit.Test; import org.mockito.Mockito; import org.mockito.internal.stubbing.answers.ThrowsException; import org.mockito.invocation.InvocationOnMock; @@ -86,7 +89,7 @@ import com.google.common.base.Joiner; * These tests make sure that DFSClient retries fetching data from DFS * properly in case of errors. */ -public class TestDFSClientRetries extends TestCase { +public class TestDFSClientRetries { private static final String ADDRESS = "0.0.0.0"; final static private int PING_INTERVAL = 1000; final static private int MIN_SLEEP_TIME = 1000; @@ -146,6 +149,7 @@ public class TestDFSClientRetries extends TestCase { * This makes sure that when DN closes clients socket after client had * successfully connected earlier, the data can still be fetched. */ + @Test public void testWriteTimeoutAtDataNode() throws IOException, InterruptedException { final int writeTimeout = 100; //milliseconds. @@ -198,6 +202,7 @@ public class TestDFSClientRetries extends TestCase { * of times trying to add a block */ @SuppressWarnings("serial") + @Test public void testNotYetReplicatedErrors() throws IOException { final String exceptionMsg = "Nope, not replicated yet..."; @@ -242,6 +247,7 @@ public class TestDFSClientRetries extends TestCase { * operation, and not over the lifetime of the stream. It is a regression * test for HDFS-127. */ + @Test public void testFailuresArePerOperation() throws Exception { long fileSize = 4096; @@ -317,6 +323,7 @@ public class TestDFSClientRetries extends TestCase { * a client to safely retry a call and still produce a correct * file. See HDFS-3031. */ + @Test public void testIdempotentAllocateBlockAndClose() throws Exception { final String src = "/testIdempotentAllocateBlock"; Path file = new Path(src); @@ -457,6 +464,7 @@ public class TestDFSClientRetries extends TestCase { /** * Test that a DFSClient waits for random time before retry on busy blocks. */ + @Test public void testDFSClientRetriesOnBusyBlocks() throws IOException { System.out.println("Testing DFSClient random waiting on busy blocks."); @@ -695,6 +703,7 @@ public class TestDFSClientRetries extends TestCase { public int get() { return counter; } } + @Test public void testGetFileChecksum() throws Exception { final String f = "/testGetFileChecksum"; final Path p = new Path(f); @@ -731,6 +740,7 @@ public class TestDFSClientRetries extends TestCase { * RPC to the server and set rpcTimeout to less than n and ensure * that socketTimeoutException is obtained */ + @Test public void testClientDNProtocolTimeout() throws IOException { final Server server = new TestServer(1, true); server.start(); @@ -760,6 +770,7 @@ public class TestDFSClientRetries extends TestCase { } /** Test client retry with namenode restarting. */ + @Test public void testNamenodeRestart() throws Exception { ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); @@ -885,6 +896,7 @@ public class TestDFSClientRetries extends TestCase { } } + @Test public void testMultipleLinearRandomRetry() { parseMultipleLinearRandomRetry(null, ""); parseMultipleLinearRandomRetry(null, "11"); @@ -922,6 +934,7 @@ public class TestDFSClientRetries extends TestCase { * read call, so the client should expect consecutive calls to behave the same * way. See HDFS-3067. */ + @Test public void testRetryOnChecksumFailure() throws UnresolvedLinkException, IOException { HdfsConfiguration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index dc3998a77a7..28ecf69e9b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -17,17 +17,21 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import java.io.File; import java.util.Collections; import java.util.List; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.junit.After; +import org.junit.Test; import com.google.common.collect.Lists; @@ -35,7 +39,7 @@ import com.google.common.collect.Lists; * This test ensures the appropriate response from the system when * the system is finalized. */ -public class TestDFSFinalize extends TestCase { +public class TestDFSFinalize { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSFinalize"); @@ -86,6 +90,7 @@ public class TestDFSFinalize extends TestCase { /** * This test attempts to finalize the NameNode and DataNode. */ + @Test public void testFinalize() throws Exception { UpgradeUtilities.initialize(); @@ -125,8 +130,8 @@ public class TestDFSFinalize extends TestCase { } // end numDir loop } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java index 8a053f8c87b..71f0c130bf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java @@ -17,21 +17,27 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.DataOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This class tests that the DFS command mkdirs cannot create subdirectories * from a file when passed an illegal path. HADOOP-281. */ -public class TestDFSMkdirs extends TestCase { +public class TestDFSMkdirs { private void writeFile(FileSystem fileSys, Path name) throws IOException { DataOutputStream stm = fileSys.create(name); @@ -43,6 +49,7 @@ public class TestDFSMkdirs extends TestCase { * Tests mkdirs can create a directory that does not exist and will * not create a subdirectory off a file. */ + @Test public void testDFSMkdirs() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -80,6 +87,7 @@ public class TestDFSMkdirs extends TestCase { /** * Tests mkdir will not create directory when parent is missing. */ + @Test public void testMkdir() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 23dc0b9a5a9..2fef3dcf1dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Random; -import junit.framework.AssertionFailedError; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -35,13 +36,15 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** Unit tests for permission */ -public class TestDFSPermission extends TestCase { +public class TestDFSPermission { public static final Log LOG = LogFactory.getLog(TestDFSPermission.class); final private static Configuration conf = new HdfsConfiguration(); @@ -106,13 +109,13 @@ public class TestDFSPermission extends TestCase { } } - @Override + @Before public void setUp() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); } - @Override + @After public void tearDown() throws IOException { if (cluster != null) { cluster.shutdown(); @@ -122,6 +125,7 @@ public class TestDFSPermission extends TestCase { /** This tests if permission setting in create, mkdir, and * setPermission works correctly */ + @Test public void testPermissionSetting() throws Exception { testPermissionSetting(OpType.CREATE); // test file creation testPermissionSetting(OpType.MKDIRS); // test directory creation @@ -257,6 +261,7 @@ public class TestDFSPermission extends TestCase { * check that ImmutableFsPermission can be used as the argument * to setPermission */ + @Test public void testImmutableFsPermission() throws IOException { fs = FileSystem.get(conf); @@ -266,6 +271,7 @@ public class TestDFSPermission extends TestCase { } /* check if the ownership of a file/directory is set correctly */ + @Test public void testOwnership() throws Exception { testOwnership(OpType.CREATE); // test file creation testOwnership(OpType.MKDIRS); // test directory creation @@ -354,6 +360,7 @@ public class TestDFSPermission extends TestCase { /* Check if namenode performs permission checking correctly for * superuser, file owner, group owner, and other users */ + @Test public void testPermissionChecking() throws Exception { try { fs = FileSystem.get(conf); @@ -533,7 +540,7 @@ public class TestDFSPermission extends TestCase { } catch(AccessControlException e) { assertTrue(expectPermissionDeny()); } - } catch (AssertionFailedError ae) { + } catch (AssertionError ae) { logPermissions(); throw ae; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java index 1b23c5f319d..7630dd650ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java @@ -16,6 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.DataOutputStream; import java.io.IOException; @@ -26,8 +28,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.junit.Test; -public class TestDFSRemove extends junit.framework.TestCase { +public class TestDFSRemove { final Path dir = new Path("/test/remove/"); void list(FileSystem fs, String name) throws IOException { @@ -51,6 +54,7 @@ public class TestDFSRemove extends junit.framework.TestCase { return total; } + @Test public void testRemove() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index ce1c62b48b3..1c00e509939 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -16,6 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.DataOutputStream; import java.io.IOException; @@ -25,8 +28,9 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.Test; -public class TestDFSRename extends junit.framework.TestCase { +public class TestDFSRename { static int countLease(MiniDFSCluster cluster) { return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease(); } @@ -46,6 +50,7 @@ public class TestDFSRename extends junit.framework.TestCase { a_out.close(); } + @Test public void testRename() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 253edba2c18..18bd79fd9c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -19,22 +19,25 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.List; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; +import org.junit.After; +import org.junit.Test; import com.google.common.base.Charsets; import com.google.common.collect.Lists; @@ -44,7 +47,7 @@ import com.google.common.collect.Lists; * the system when the system is rolled back under various storage state and * version conditions. */ -public class TestDFSRollback extends TestCase { +public class TestDFSRollback { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSRollback"); @@ -131,6 +134,7 @@ public class TestDFSRollback extends TestCase { * This test attempts to rollback the NameNode and DataNode under * a number of valid and invalid conditions. */ + @Test public void testRollback() throws Exception { File[] baseDirs; UpgradeUtilities.initialize(); @@ -299,8 +303,8 @@ public class TestDFSRollback extends TestCase { } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 30cb991745a..ce402b1878b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.File; @@ -33,8 +37,6 @@ import java.util.Random; import java.util.Scanner; import java.util.zip.GZIPOutputStream; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -53,11 +55,12 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; /** * This class tests commands from DFSShell. */ -public class TestDFSShell extends TestCase { +public class TestDFSShell { private static final Log LOG = LogFactory.getLog(TestDFSShell.class); static final String TEST_ROOT_DIR = @@ -94,6 +97,7 @@ public class TestDFSShell extends TestCase { System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s); } + @Test public void testZeroSizeFile() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -136,6 +140,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testRecrusiveRm() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -161,6 +166,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testDu() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -210,6 +216,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testPut() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -308,6 +315,7 @@ public class TestDFSShell extends TestCase { /** check command error outputs and exit statuses. */ + @Test public void testErrOutPut() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -448,6 +456,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testURIPaths() throws Exception { Configuration srcConf = new HdfsConfiguration(); Configuration dstConf = new HdfsConfiguration(); @@ -540,6 +549,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testText() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -614,6 +624,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testCopyToLocal() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -711,6 +722,7 @@ public class TestDFSShell extends TestCase { return path; } + @Test public void testCount() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -877,6 +889,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testFilePermissions() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -942,6 +955,7 @@ public class TestDFSShell extends TestCase { /** * Tests various options of DFSShell. */ + @Test public void testDFSShell() throws IOException { Configuration conf = new HdfsConfiguration(); /* This tests some properties of ChecksumFileSystem as well. @@ -1209,6 +1223,7 @@ public class TestDFSShell extends TestCase { String run(int exitcode, String... options) throws IOException; } + @Test public void testRemoteException() throws Exception { UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"}); @@ -1252,6 +1267,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testGet() throws IOException { DFSTestUtil.setLogLevel2All(FSInputChecker.LOG); final Configuration conf = new HdfsConfiguration(); @@ -1312,6 +1328,7 @@ public class TestDFSShell extends TestCase { } } + @Test public void testLsr() throws Exception { final Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -1369,6 +1386,7 @@ public class TestDFSShell extends TestCase { * and return -1 exit code. * @throws Exception */ + @Test public void testInvalidShell() throws Exception { Configuration conf = new Configuration(); // default FS (non-DFS) DFSAdmin admin = new DFSAdmin(); @@ -1378,6 +1396,7 @@ public class TestDFSShell extends TestCase { } // force Copy Option is -f + @Test public void testCopyCommandsWithForceOption() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java index 402c6e804a4..2b37e2e3cf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java @@ -17,22 +17,24 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; -public class TestDFSShellGenericOptions extends TestCase { +public class TestDFSShellGenericOptions { + @Test public void testDFSCommand() throws IOException { String namenode = null; MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java index 8a96c89a3d8..797d5ca38c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java @@ -19,25 +19,27 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.junit.After; +import org.junit.Test; /** * This test ensures the appropriate response (successful or failure) from * a Datanode when the system is started with differing version combinations. */ -public class TestDFSStartupVersions extends TestCase { +public class TestDFSStartupVersions { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSStartupVersions"); @@ -235,6 +237,7 @@ public class TestDFSStartupVersions extends TestCase { * this iterations version 3-tuple * */ + @Test public void testVersions() throws Exception { UpgradeUtilities.initialize(); Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, @@ -276,8 +279,8 @@ public class TestDFSStartupVersions extends TestCase { } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index 80a415e4d4f..c0392ac525d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -17,25 +17,32 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; -import junit.framework.TestCase; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; - -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * This test ensures the appropriate response (successful or failure) from * the system when the system is started under various storage state and * version conditions. */ -public class TestDFSStorageStateRecovery extends TestCase { +public class TestDFSStorageStateRecovery { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSStorageStateRecovery"); @@ -311,6 +318,7 @@ public class TestDFSStorageStateRecovery extends TestCase { * This test iterates over the testCases table and attempts * to startup the NameNode normally. */ + @Test public void testNNStorageStates() throws Exception { String[] baseDirs; @@ -354,6 +362,7 @@ public class TestDFSStorageStateRecovery extends TestCase { * This test iterates over the testCases table for Datanode storage and * attempts to startup the DataNode normally. */ + @Test public void testDNStorageStates() throws Exception { String[] baseDirs; @@ -394,6 +403,7 @@ public class TestDFSStorageStateRecovery extends TestCase { * This test iterates over the testCases table for block pool storage and * attempts to startup the DataNode normally. */ + @Test public void testBlockPoolStorageStates() throws Exception { String[] baseDirs; @@ -431,14 +441,14 @@ public class TestDFSStorageStateRecovery extends TestCase { } // end numDirs loop } - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { LOG.info("Setting up the directory structures."); UpgradeUtilities.initialize(); } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index ad3e6d8c551..b0879683637 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -19,6 +19,13 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; +import static org.apache.hadoop.test.GenericTestUtils.assertExists; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; @@ -27,14 +34,10 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - -import static org.apache.hadoop.test.GenericTestUtils.assertExists; import org.apache.hadoop.util.StringUtils; import org.junit.BeforeClass; import org.junit.Ignore; @@ -43,8 +46,6 @@ import org.junit.Test; import com.google.common.base.Charsets; import com.google.common.base.Joiner; -import static org.junit.Assert.*; - /** * This test ensures the appropriate response (successful or failure) from * the system when the system is upgraded under various storage state and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index ba92c569d95..8db1741e82a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -18,13 +18,22 @@ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; import java.util.Iterator; import java.util.LinkedList; import java.util.TreeMap; import java.util.zip.CRC32; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileStatus; @@ -34,8 +43,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.junit.Test; /** * This tests data transfer protocol handling in the Datanode. It sends @@ -46,7 +54,7 @@ import org.apache.commons.logging.LogFactory; * 2) hadoop-dfs-dir.txt : checksums that are compared in this test. * Please read hadoop-dfs-dir.txt for more information. */ -public class TestDFSUpgradeFromImage extends TestCase { +public class TestDFSUpgradeFromImage { private static final Log LOG = LogFactory .getLog(TestDFSUpgradeFromImage.class); @@ -182,6 +190,7 @@ public class TestDFSUpgradeFromImage extends TestCase { * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a * NN, verifying that the correct error message is thrown. */ + @Test public void testFailOnPreUpgradeImage() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -225,6 +234,7 @@ public class TestDFSUpgradeFromImage extends TestCase { /** * Test upgrade from 0.22 image */ + @Test public void testUpgradeFromRel22Image() throws IOException { unpackStorage(HADOOP22_IMAGE); upgradeAndVerify(); @@ -234,6 +244,7 @@ public class TestDFSUpgradeFromImage extends TestCase { * Test upgrade from 0.22 image with corrupt md5, make sure it * fails to upgrade */ + @Test public void testUpgradeFromCorruptRel22Image() throws IOException { unpackStorage(HADOOP22_IMAGE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 6a0f4e76ce8..af1f6d6ea57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -18,10 +18,22 @@ package org.apache.hadoop.hdfs; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.*; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.net.InetSocketAddress; @@ -34,18 +46,18 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Before; +import org.junit.Test; public class TestDFSUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index 1ef4eac997e..5699c10171d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -20,7 +20,10 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.InputStream; import java.io.PrintWriter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index 6db59d281ff..a766263707f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -18,6 +18,10 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -28,8 +32,6 @@ import java.util.concurrent.TimeoutException; import java.util.regex.Matcher; import java.util.regex.Pattern; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -40,11 +42,12 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This test verifies that block verification occurs on the datanode */ -public class TestDatanodeBlockScanner extends TestCase { +public class TestDatanodeBlockScanner { private static final Log LOG = LogFactory.getLog(TestDatanodeBlockScanner.class); @@ -118,6 +121,7 @@ public class TestDatanodeBlockScanner extends TestCase { return verificationTime; } + @Test public void testDatanodeBlockScanner() throws IOException, TimeoutException { long startTime = Time.now(); @@ -168,6 +172,7 @@ public class TestDatanodeBlockScanner extends TestCase { return MiniDFSCluster.corruptReplica(replica, blk); } + @Test public void testBlockCorruptionPolicy() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); @@ -232,12 +237,14 @@ public class TestDatanodeBlockScanner extends TestCase { * 4. Test again waits until the block is reported with expected number * of good replicas. */ + @Test public void testBlockCorruptionRecoveryPolicy1() throws Exception { // Test recovery of 1 corrupt replica LOG.info("Testing corrupt replica recovery for one corrupt replica"); blockCorruptionRecoveryPolicy(4, (short)3, 1); } + @Test public void testBlockCorruptionRecoveryPolicy2() throws Exception { // Test recovery of 2 corrupt replicas LOG.info("Testing corrupt replica recovery for two corrupt replicas"); @@ -302,6 +309,7 @@ public class TestDatanodeBlockScanner extends TestCase { } /** Test if NameNode handles truncated blocks in block report */ + @Test public void testTruncatedBlockReport() throws Exception { final Configuration conf = new HdfsConfiguration(); final short REPLICATION_FACTOR = (short)2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index ccf4f7685db..2d18124061d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hdfs; -import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; -import junit.framework.TestCase; +import java.io.IOException; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -31,18 +32,19 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests that a file need not be closed before its * data can be read by another client. */ -public class TestDatanodeDeath extends TestCase { +public class TestDatanodeDeath { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); @@ -411,11 +413,15 @@ public class TestDatanodeDeath extends TestCase { } } + @Test public void testSimple0() throws IOException {simpleTest(0);} + @Test public void testSimple1() throws IOException {simpleTest(1);} + @Test public void testSimple2() throws IOException {simpleTest(2);} + @Test public void testComplex() throws IOException {complexTest();} } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 6bb7b456666..6ebe0a4b52a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -19,7 +19,8 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index a27739ec766..d5802b0b54c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -17,27 +17,30 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; + import java.net.InetSocketAddress; import java.util.ArrayList; -import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; - import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import static org.apache.hadoop.test.MetricsAsserts.*; +import org.junit.Test; /** * This test ensures the all types of data node report work correctly. */ -public class TestDatanodeReport extends TestCase { +public class TestDatanodeReport { final static private Configuration conf = new HdfsConfiguration(); final static private int NUM_OF_DATANODES = 4; /** * This test attempts to different types of datanode report. */ + @Test public void testDatanodeReport() throws Exception { conf.setInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java index 6a66e947e79..27f13e3d251 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.net.InetSocketAddress; import java.net.URI; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.junit.Test; /** Test NameNode port defaulting code. */ -public class TestDefaultNameNodePort extends TestCase { +public class TestDefaultNameNodePort { + @Test public void testGetAddressFromString() throws Exception { assertEquals(NameNode.getAddress("foo").getPort(), NameNode.DEFAULT_PORT); @@ -40,6 +42,7 @@ public class TestDefaultNameNodePort extends TestCase { 555); } + @Test public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); @@ -50,6 +53,7 @@ public class TestDefaultNameNodePort extends TestCase { assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); } + @Test public void testGetUri() { assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)), URI.create("hdfs://foo:555")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java index 518adddf106..71520922458 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java @@ -18,13 +18,15 @@ package org.apache.hadoop.hdfs; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.conf.Configuration; -import junit.framework.TestCase; +import static org.junit.Assert.assertTrue; -public class TestDeprecatedKeys extends TestCase { +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +public class TestDeprecatedKeys { //Tests a deprecated key + @Test public void testDeprecatedKeys() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set("topology.script.file.name", "xyz"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java index fea024c2c75..1faff65727a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java @@ -17,14 +17,16 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -35,11 +37,12 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.junit.Test; /** * This class tests if FSInputChecker works correctly. */ -public class TestFSInputChecker extends TestCase { +public class TestFSInputChecker { static final long seed = 0xDEADBEEFL; static final int BYTES_PER_SUM = 10; static final int BLOCK_SIZE = 2*BYTES_PER_SUM; @@ -291,6 +294,7 @@ public class TestFSInputChecker extends TestCase { in.close(); } + @Test public void testFSInputChecker() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java index da18bbe0cc7..a3b3f808eb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java @@ -17,21 +17,24 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; -import java.util.Random; -import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * This class tests if FSOutputSummer works correctly. */ -public class TestFSOutputSummer extends TestCase { +public class TestFSOutputSummer { private static final long seed = 0xDEADBEEFL; private static final int BYTES_PER_CHECKSUM = 10; private static final int BLOCK_SIZE = 2*BYTES_PER_CHECKSUM; @@ -111,6 +114,7 @@ public class TestFSOutputSummer extends TestCase { /** * Test write opeation for output stream in DFS. */ + @Test public void testFSOutputSummer() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index 20a7b5b9834..f488040c492 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -17,12 +17,16 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import junit.framework.TestCase; - +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -36,16 +40,14 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests the building blocks that are needed to * support HDFS appends. */ -public class TestFileAppend2 extends TestCase { +public class TestFileAppend2 { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); @@ -79,6 +81,7 @@ public class TestFileAppend2 extends TestCase { * Verify that all data exists in file. * @throws IOException an exception might be thrown */ + @Test public void testSimpleAppend() throws IOException { final Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -329,6 +332,7 @@ public class TestFileAppend2 extends TestCase { * Test that appends to files at random offsets. * @throws IOException an exception might be thrown */ + @Test public void testComplexAppend() throws IOException { fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java index 038edd8d2bc..a2ab1edda2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -42,7 +46,6 @@ import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.*; /** This class implements some of tests posted in HADOOP-2658. */ public class TestFileAppend3 { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java index 6b18965687b..d086c77a9bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertEquals; import java.io.File; import java.io.IOException; import java.util.EnumMap; -import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -32,8 +31,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.apache.hadoop.hdfs.server.namenode.FSEditLog; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NNStorage; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java index 632d8cc35e3..fae302d5c70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hdfs; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; @@ -33,10 +38,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Level; import org.apache.log4j.Logger; -import java.io.IOException; -import java.util.*; -import java.util.concurrent.atomic.*; - /** * This class tests the cases of a concurrent reads/writes to a file; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index a8624400aa4..458880af566 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -18,14 +18,16 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.util.ArrayList; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -42,11 +44,12 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.log4j.Level; +import org.junit.Test; /** * A JUnit test for corrupted file handling. */ -public class TestFileCorruption extends TestCase { +public class TestFileCorruption { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); @@ -56,6 +59,7 @@ public class TestFileCorruption extends TestCase { static Log LOG = ((Log4JLogger)NameNode.stateChangeLog); /** check if DFS can handle corrupted blocks properly */ + @Test public void testFileCorruption() throws Exception { MiniDFSCluster cluster = null; DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption"). @@ -88,6 +92,7 @@ public class TestFileCorruption extends TestCase { } /** check if local FS can handle corrupted blocks properly */ + @Test public void testLocalFileCorruption() throws Exception { Configuration conf = new HdfsConfiguration(); Path file = new Path(System.getProperty("test.build.data"), "corruptFile"); @@ -114,6 +119,7 @@ public class TestFileCorruption extends TestCase { * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown. * See Hadoop-4351. */ + @Test public void testArrayOutOfBoundsException() throws Exception { MiniDFSCluster cluster = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index c2f630c45b6..77eb3f86576 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -16,7 +16,6 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; - import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; @@ -31,6 +30,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; import java.io.BufferedReader; @@ -70,11 +72,12 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests various cases during file creation. */ -public class TestFileCreation extends junit.framework.TestCase { +public class TestFileCreation { static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/"; { @@ -123,6 +126,7 @@ public class TestFileCreation extends junit.framework.TestCase { /** * Test that server default values can be retrieved on the client side */ + @Test public void testServerDefaults() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); @@ -148,11 +152,13 @@ public class TestFileCreation extends junit.framework.TestCase { } } + @Test public void testFileCreation() throws IOException { checkFileCreation(null); } /** Same test but the client should bind to a local interface */ + @Test public void testFileCreationSetLocalInterface() throws IOException { assumeTrue(System.getProperty("os.name").startsWith("Linux")); @@ -255,6 +261,7 @@ public class TestFileCreation extends junit.framework.TestCase { /** * Test deleteOnExit */ + @Test public void testDeleteOnExit() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -317,6 +324,7 @@ public class TestFileCreation extends junit.framework.TestCase { /** * Test that file data does not become corrupted even in the face of errors. */ + @Test public void testFileCreationError1() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); @@ -389,6 +397,7 @@ public class TestFileCreation extends junit.framework.TestCase { * Test that the filesystem removes the last block from a file if its * lease expires. */ + @Test public void testFileCreationError2() throws IOException { long leasePeriod = 1000; System.out.println("testFileCreationError2 start"); @@ -454,6 +463,7 @@ public class TestFileCreation extends junit.framework.TestCase { } /** test addBlock(..) when replication map = new HashMap(); final Random RAN = new Random(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 9051fd2d774..036252ddc31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -17,25 +17,26 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.net.UnknownHostException; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; -import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.BackupNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNS; +import org.junit.Test; /** * This test checks correctness of port usage by hdfs components: @@ -47,7 +48,7 @@ import org.apache.hadoop.net.DNS; * - if the port = 0 (ephemeral) then the server should choose * a free port and start on it. */ -public class TestHDFSServerPorts extends TestCase { +public class TestHDFSServerPorts { public static final Log LOG = LogFactory.getLog(TestHDFSServerPorts.class); // reset default 0.0.0.0 addresses in order to avoid IPv6 problem @@ -250,6 +251,7 @@ public class TestHDFSServerPorts extends TestCase { return true; } + @Test public void testNameNodePorts() throws Exception { runTestNameNodePorts(false); runTestNameNodePorts(true); @@ -300,6 +302,7 @@ public class TestHDFSServerPorts extends TestCase { /** * Verify datanode port usage. */ + @Test public void testDataNodePorts() throws Exception { NameNode nn = null; try { @@ -335,6 +338,7 @@ public class TestHDFSServerPorts extends TestCase { /** * Verify secondary namenode port usage. */ + @Test public void testSecondaryNodePorts() throws Exception { NameNode nn = null; try { @@ -363,6 +367,7 @@ public class TestHDFSServerPorts extends TestCase { /** * Verify BackupNode port usage. */ + @Test public void testBackupNodePorts() throws Exception { NameNode nn = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java index 2a34cf9d03e..feaca8c996c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.InterruptedIOException; + import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -24,15 +31,8 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.log4j.Level; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import org.junit.Test; -import java.io.InterruptedIOException; -import java.io.IOException; - /** Class contains a set of tests to verify the correctness of * newly introduced {@link FSDataOutputStream#hflush()} method */ public class TestHFlush { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index 66e8e85e612..bdf4c02d4b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java index 8417a53853e..e7df0102130 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java @@ -18,15 +18,16 @@ package org.apache.hadoop.hdfs; -import static - org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; import java.io.IOException; import java.lang.reflect.Field; import java.net.URI; import java.security.PrivilegedExceptionAction; -import org.junit.Test; -import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -36,6 +37,7 @@ import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.junit.Test; public class TestHftpDelegationToken { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java index 57bfe58c3f7..5e20d46e2c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java @@ -18,19 +18,18 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.io.InputStream; -import java.net.URISyntaxException; -import java.net.URI; -import java.net.URL; import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; import java.util.Random; -import org.junit.Test; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import static org.junit.Assert.*; - import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -38,12 +37,14 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.util.ServletUtil; import org.apache.log4j.Level; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; public class TestHftpFileSystem { private static final Random RAN = new Random(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java index 483d184d928..0f8d7d00e4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java @@ -27,7 +27,6 @@ import java.net.InetAddress; import java.net.ServerSocket; import java.net.SocketTimeoutException; import java.net.URI; -import java.net.URLConnection; import java.util.LinkedList; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java index fa029409b7a..ab28ce27d6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.net.InetSocketAddress; import java.util.HashSet; import java.util.Set; -import java.net.*; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,12 +37,13 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This class tests the replication and injection of blocks of a DFS file for simulated storage. */ -public class TestInjectionForSimulatedStorage extends TestCase { +public class TestInjectionForSimulatedStorage { private int checksumSize = 16; private int blockSize = checksumSize*2; private int numBlocks = 4; @@ -122,6 +123,7 @@ public class TestInjectionForSimulatedStorage extends TestCase { * The blocks are then injected in one of the DNs. The expected behaviour is * that the NN will arrange for themissing replica will be copied from a valid source. */ + @Test public void testInjection() throws IOException { MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java index 65a0465bd4d..1f42c0d4de1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.net.InetSocketAddress; -import junit.framework.Assert; - import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB; @@ -78,16 +79,16 @@ public class TestIsMethodSupported { nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); boolean exists = translator.isMethodSupported("rollEditLog"); - Assert.assertTrue(exists); + assertTrue(exists); exists = translator.isMethodSupported("bogusMethod"); - Assert.assertFalse(exists); + assertFalse(exists); } @Test public void testDatanodeProtocol() throws IOException { DatanodeProtocolClientSideTranslatorPB translator = new DatanodeProtocolClientSideTranslatorPB(nnAddress, conf); - Assert.assertTrue(translator.isMethodSupported("sendHeartbeat")); + assertTrue(translator.isMethodSupported("sendHeartbeat")); } @Test @@ -97,12 +98,12 @@ public class TestIsMethodSupported { UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf)); //Namenode doesn't implement ClientDatanodeProtocol - Assert.assertFalse(translator.isMethodSupported("refreshNamenodes")); + assertFalse(translator.isMethodSupported("refreshNamenodes")); translator = new ClientDatanodeProtocolTranslatorPB( dnAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf)); - Assert.assertTrue(translator.isMethodSupported("refreshNamenodes")); + assertTrue(translator.isMethodSupported("refreshNamenodes")); } @Test @@ -111,7 +112,7 @@ public class TestIsMethodSupported { (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy( conf, nnAddress, ClientProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue(translator.isMethodSupported("mkdirs")); + assertTrue(translator.isMethodSupported("mkdirs")); } @Test @@ -120,7 +121,7 @@ public class TestIsMethodSupported { NameNodeProxies.createNonHAProxy(conf, nnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); //Nameode doesn't implement JournalProtocol - Assert.assertFalse(translator.isMethodSupported("startLogSegment")); + assertFalse(translator.isMethodSupported("startLogSegment")); } @Test @@ -130,12 +131,12 @@ public class TestIsMethodSupported { nnAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), 0); //Not supported at namenode - Assert.assertFalse(translator.isMethodSupported("initReplicaRecovery")); + assertFalse(translator.isMethodSupported("initReplicaRecovery")); translator = new InterDatanodeProtocolTranslatorPB( dnAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), 0); - Assert.assertTrue(translator.isMethodSupported("initReplicaRecovery")); + assertTrue(translator.isMethodSupported("initReplicaRecovery")); } @Test @@ -145,7 +146,7 @@ public class TestIsMethodSupported { NameNodeProxies.createNonHAProxy(conf, nnAddress, GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue(translator.isMethodSupported("getGroupsForUser")); + assertTrue(translator.isMethodSupported("getGroupsForUser")); } @Test @@ -155,7 +156,7 @@ public class TestIsMethodSupported { NameNodeProxies.createNonHAProxy(conf, nnAddress, RefreshAuthorizationPolicyProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue(translator.isMethodSupported("refreshServiceAcl")); + assertTrue(translator.isMethodSupported("refreshServiceAcl")); } @Test @@ -165,7 +166,7 @@ public class TestIsMethodSupported { NameNodeProxies.createNonHAProxy(conf, nnAddress, RefreshUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue( + assertTrue( translator.isMethodSupported("refreshUserToGroupsMappings")); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index b66bf37ab46..3ca1cf3ec26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Arrays; @@ -30,7 +32,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.junit.Test; -import static org.junit.Assert.assertTrue; /** * This class tests that blocks can be larger than 2GB diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index d8d8dd7b45a..a718d6057fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -17,34 +17,31 @@ */ package org.apache.hadoop.hdfs; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; + import java.io.DataOutputStream; import java.io.IOException; import java.security.PrivilegedExceptionAction; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.doNothing; public class TestLease { static boolean hasLease(MiniDFSCluster cluster, Path src) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index 98b8bdf5924..e739f61afa1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -16,6 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -31,8 +33,9 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.Test; -public class TestLeaseRecovery extends junit.framework.TestCase { +public class TestLeaseRecovery { static final int BLOCK_SIZE = 1024; static final short REPLICATION_NUM = (short)3; private static final long LEASE_PERIOD = 300L; @@ -66,6 +69,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase { * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. */ + @Test public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE = 3000; Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java index 61aeb506b0b..361e5c304df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.*; +import static org.junit.Assert.assertSame; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java index 797130c1aa2..ec9e7e2e481 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java @@ -22,7 +22,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.TestListFiles; import org.apache.log4j.Level; - import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java index 2e3f6810ec3..557f3ac09c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.EnumSet; import java.util.Random; @@ -33,13 +37,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.log4j.Level; - -import static org.junit.Assert.*; - import org.junit.After; import org.junit.AfterClass; -import org.junit.Test; import org.junit.BeforeClass; +import org.junit.Test; /** * This class tests the FileStatus API. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java index eef6bbed1f4..fb90ad2786a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java @@ -17,17 +17,23 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * This class tests the DFS class via the FileSystem interface in a single node * mini-cluster. */ -public class TestLocalDFS extends TestCase { +public class TestLocalDFS { private void writeFile(FileSystem fileSys, Path name) throws IOException { DataOutputStream stm = fileSys.create(name); @@ -59,6 +65,7 @@ public class TestLocalDFS extends TestCase { /** * Tests get/set working directory in DFS. */ + @Test public void testWorkingDirectory() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index 0eec0d18774..a7fd82aea0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -18,15 +18,17 @@ package org.apache.hadoop.hdfs; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.junit.After; import org.junit.Before; import org.junit.Test; -import java.io.File; - /** * Tests MiniDFS cluster setup/teardown and isolation. * Every instance is brought up with a new data dir, to ensure that @@ -70,7 +72,7 @@ public class TestMiniDFSCluster { conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { - Assert.assertEquals(c1Path+"/data", cluster.getDataDirectory()); + assertEquals(c1Path+"/data", cluster.getDataDirectory()); } finally { cluster.shutdown(); } @@ -91,14 +93,14 @@ public class TestMiniDFSCluster { MiniDFSCluster cluster3 = null; try { String dataDir2 = cluster2.getDataDirectory(); - Assert.assertEquals(c2Path + "/data", dataDir2); + assertEquals(c2Path + "/data", dataDir2); //change the data dir conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataCluster3.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); cluster3 = builder.build(); String dataDir3 = cluster3.getDataDirectory(); - Assert.assertTrue("Clusters are bound to the same directory: " + dataDir2, + assertTrue("Clusters are bound to the same directory: " + dataDir2, !dataDir2.equals(dataDir3)); } finally { MiniDFSCluster.shutdownCluster(cluster3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java index 26f42631fd8..5516ced5fe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.net.URL; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -30,17 +32,19 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.junit.Test; /** * The test makes sure that NameNode detects presense blocks that do not have * any valid replicas. In addition, it verifies that HDFS front page displays * a warning in such a case. */ -public class TestMissingBlocksAlert extends TestCase { +public class TestMissingBlocksAlert { private static final Log LOG = LogFactory.getLog(TestMissingBlocksAlert.class); + @Test public void testMissingBlocksAlert() throws IOException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java index 5ba5681a581..f30de965fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java @@ -17,23 +17,27 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Random; -import java.net.*; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.junit.Test; /** * This class tests the decommissioning of nodes. */ -public class TestModTime extends TestCase { +public class TestModTime { static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; static final int fileSize = 16384; @@ -74,6 +78,7 @@ public class TestModTime extends TestCase { /** * Tests modification time in DFS. */ + @Test public void testModTime() throws IOException { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index 61c93fe6f0b..8d320903a89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.junit.Test; - -import java.io.*; +import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -28,15 +26,16 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests hflushing concurrently from many threads. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index e3fce6e0faf..dacd4bca998 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -17,21 +17,22 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.IOException; import java.nio.ByteBuffer; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.LogManager; -import static org.junit.Assert.*; - /** * Driver class for testing the use of DFSInputStream by multiple concurrent * readers, using the different read APIs. See subclasses for the actual test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index 6378d426448..55695415d88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -17,24 +17,25 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.DataOutputStream; import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.junit.Test; /** * This class tests the DFS positional read functionality in a single node * mini-cluster. */ -public class TestPread extends TestCase { +public class TestPread { static final long seed = 0xDEADBEEFL; static final int blockSize = 4096; boolean simulatedStorage = false; @@ -196,6 +197,7 @@ public class TestPread extends TestCase { /** * Tests positional read in DFS. */ + @Test public void testPreadDFS() throws IOException { dfsPreadTest(false); //normal pread dfsPreadTest(true); //trigger read code path without transferTo. @@ -225,6 +227,7 @@ public class TestPread extends TestCase { } } + @Test public void testPreadDFSSimulated() throws IOException { simulatedStorage = true; testPreadDFS(); @@ -234,6 +237,7 @@ public class TestPread extends TestCase { /** * Tests positional read in LocalFS. */ + @Test public void testPreadLocalFS() throws IOException { Configuration conf = new HdfsConfiguration(); FileSystem fileSys = FileSystem.getLocal(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 98cddb4eb40..f6e09283224 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.OutputStream; import java.security.PrivilegedExceptionAction; @@ -24,18 +28,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; -import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; - import org.junit.Test; -import static org.junit.Assert.*; /** A class for testing quota-related commands */ public class TestQuota { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java index 6189a96f81c..ee5d50aa024 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -29,8 +30,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.log4j.Level; +import org.junit.Test; -public class TestRenameWhileOpen extends junit.framework.TestCase { +public class TestRenameWhileOpen { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); @@ -47,6 +49,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase { * mkdir /user/dir3 * move /user/dir1 /user/dir3 */ + @Test public void testWhileOpenRenameParent() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -132,6 +135,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase { * open /user/dir1/file1 /user/dir2/file2 * move /user/dir1 /user/dir3 */ + @Test public void testWhileOpenRenameParentToNonexistentDir() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -206,6 +210,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase { * mkdir /user/dir2 * move /user/dir1/file1 /user/dir2/ */ + @Test public void testWhileOpenRenameToExistentDirectory() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -270,6 +275,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase { * open /user/dir1/file1 * move /user/dir1/file1 /user/dir2/ */ + @Test public void testWhileOpenRenameToNonExistentDirectory() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 9a7504a0508..ab8df438dd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 994da701bc0..e819e023b14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.OutputStream; @@ -25,8 +28,6 @@ import java.net.InetSocketAddress; import java.util.Iterator; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,16 +40,17 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This class tests the replication of a DFS file. */ -public class TestReplication extends TestCase { +public class TestReplication { private static final long seed = 0xDEADBEEFL; private static final int blockSize = 8192; private static final int fileSize = 16384; @@ -149,6 +151,7 @@ public class TestReplication extends TestCase { /* * Test if Datanode reports bad blocks during replication request */ + @Test public void testBadBlockReportOnTransfer() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem fs = null; @@ -240,11 +243,13 @@ public class TestReplication extends TestCase { } + @Test public void testReplicationSimulatedStorag() throws IOException { runReplication(true); } + @Test public void testReplication() throws IOException { runReplication(false); } @@ -298,6 +303,7 @@ public class TestReplication extends TestCase { * two of the blocks and removes one of the replicas. Expected behavior is * that missing replica will be copied from one valid source. */ + @Test public void testPendingReplicationRetry() throws IOException { MiniDFSCluster cluster = null; @@ -400,6 +406,7 @@ public class TestReplication extends TestCase { * Test if replication can detect mismatched length on-disk blocks * @throws Exception */ + @Test public void testReplicateLenMismatchedBlock() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java index 7e130c98525..f6345a3edac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java @@ -18,17 +18,19 @@ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * A JUnit test for checking if restarting DFS preserves integrity. */ -public class TestRestartDFS extends TestCase { +public class TestRestartDFS { public void runTests(Configuration conf, boolean serviceTest) throws Exception { MiniDFSCluster cluster = null; DFSTestUtil files = new DFSTestUtil.Builder().setName("TestRestartDFS"). @@ -110,6 +112,7 @@ public class TestRestartDFS extends TestCase { } } /** check if DFS remains in proper condition after a restart */ + @Test public void testRestartDFS() throws Exception { final Configuration conf = new HdfsConfiguration(); runTests(conf, false); @@ -118,6 +121,7 @@ public class TestRestartDFS extends TestCase { /** check if DFS remains in proper condition after a restart * this rerun is with 2 ports enabled for RPC in the namenode */ + @Test public void testRestartDualPortDFS() throws Exception { final Configuration conf = new HdfsConfiguration(); runTests(conf, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 1c3ada5318a..6eab01090c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -18,30 +18,33 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; - -import static org.junit.Assert.*; -import org.junit.Before; import org.junit.After; +import org.junit.Before; import org.junit.Test; import com.google.common.base.Supplier; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java index a34e00a03c9..c2f1cf39c3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java @@ -17,24 +17,26 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.DataOutputStream; import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumFileSystem; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; +import org.junit.Test; /** * This class tests the presence of seek bug as described * in HADOOP-508 */ -public class TestSeekBug extends TestCase { +public class TestSeekBug { static final long seed = 0xDEADBEEFL; static final int ONEMB = 1 << 20; @@ -123,6 +125,7 @@ public class TestSeekBug extends TestCase { /** * Test if the seek bug exists in FSDataInputStream in DFS. */ + @Test public void testSeekBugDFS() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -142,6 +145,7 @@ public class TestSeekBug extends TestCase { /** * Tests if the seek bug exists in FSDataInputStream in LocalFS. */ + @Test public void testSeekBugLocalFS() throws IOException { Configuration conf = new HdfsConfiguration(); FileSystem fileSys = FileSystem.getLocal(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java index f7ea537be11..18341ca9558 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java @@ -17,27 +17,33 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.text.SimpleDateFormat; +import java.util.Date; import java.util.Random; -import java.net.*; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.util.Time; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; -import java.text.SimpleDateFormat; -import java.util.Date; +import org.junit.Test; /** * This class tests the access time on files. * */ -public class TestSetTimes extends TestCase { +public class TestSetTimes { static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; static final int fileSize = 16384; @@ -78,6 +84,7 @@ public class TestSetTimes extends TestCase { /** * Tests mod & access time in DFS. */ + @Test public void testTimes() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -209,6 +216,7 @@ public class TestSetTimes extends TestCase { /** * Tests mod time change at close in DFS. */ + @Test public void testTimesAtClose() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java index de6c8a47e4f..fb810863776 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java @@ -19,9 +19,10 @@ package org.apache.hadoop.hdfs; import java.io.IOException; -import junit.framework.TestCase; +import org.junit.Test; -public class TestSetrepDecreasing extends TestCase { +public class TestSetrepDecreasing { + @Test public void testSetrepDecreasing() throws IOException { TestSetrepIncreasing.setrep(5, 3, false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java index 29c1aa221e0..9824064c717 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java @@ -17,14 +17,21 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.junit.Test; -public class TestSetrepIncreasing extends TestCase { +public class TestSetrepIncreasing { static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -68,9 +75,11 @@ public class TestSetrepIncreasing extends TestCase { } } + @Test public void testSetrepIncreasing() throws IOException { setrep(3, 7, false); } + @Test public void testSetrepIncreasingSimulatedStorage() throws IOException { setrep(3, 7, true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java index 60e8f0487c9..8cbb4fd3172 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java @@ -17,23 +17,27 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; import java.util.Random; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.junit.Test; /** * This class tests the creation of files with block-size * smaller than the default buffer size of 4K. */ -public class TestSmallBlock extends TestCase { +public class TestSmallBlock { static final long seed = 0xDEADBEEFL; static final int blockSize = 1; static final int fileSize = 20; @@ -90,6 +94,7 @@ public class TestSmallBlock extends TestCase { /** * Tests small block size in in DFS. */ + @Test public void testSmallBlock() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -108,6 +113,7 @@ public class TestSmallBlock extends TestCase { cluster.shutdown(); } } + @Test public void testSmallBlockSimulatedStorage() throws IOException { simulatedStorage = true; testSmallBlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java index 68c593b2e86..5503238330a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs; +import java.io.OutputStream; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; - -import java.io.OutputStream; import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 0b6bceafafc..7f792f43b54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; + import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -28,24 +31,21 @@ import java.net.URI; import java.util.Arrays; import java.util.Collections; import java.util.zip.CRC32; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; - -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; - import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java index 602f016dac4..4c117a992e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.protocol; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java index d329f493768..89e8b0eb625 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.protocol; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.util.EnumSet; -import static org.junit.Assert.*; -import org.junit.Test; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.junit.Test; /** * Test for {@link LayoutVersion} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index f58c8636a1d..c279d6a9365 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.protocolPB; -import static junit.framework.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; @@ -27,9 +28,9 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; @@ -57,10 +58,10 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; -import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java index 5cf64bae3f3..0c6bb2103e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java @@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.security; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayInputStream; import java.io.DataInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java index 4f08a7e5e3c..cb55854f1ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.security.token.block; import java.io.IOException; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.security.token.Token; /** Utilities for security tests */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 2374b785f2d..9ce8bf448c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.balancer; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import java.io.IOException; import java.net.URI; import java.util.ArrayList; @@ -41,13 +44,11 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; /** * This class tests if a balancer schedules tasks correctly. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index 9d13a2b6190..1a309910eb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; +import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index b130e027b04..dfd0b947c1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java index f1991345da3..b8eb6855fbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.junit.Test; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import java.util.ArrayList; import java.util.Iterator; @@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.junit.Test; /** * This class provides tests for BlockInfo class, which is used in BlocksMap. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java index 2d676578ffe..b22383469f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java @@ -17,25 +17,25 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.junit.Test; /** * Test if FSNamesystem handles heartbeat right */ -public class TestComputeInvalidateWork extends TestCase { +public class TestComputeInvalidateWork { /** * Test if {@link FSNamesystem#computeInvalidateWork(int)} * can schedule invalidate work correctly */ + @Test public void testCompInvalidate() throws Exception { final Configuration conf = new HdfsConfiguration(); final int NUM_OF_DATANODES = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index ee6a26026c6..0912ad90238 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Arrays; import java.util.HashMap; @@ -24,12 +29,11 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; +import org.junit.Test; /** @@ -38,7 +42,7 @@ import org.apache.hadoop.hdfs.protocol.Block; * CorruptReplicasMap::getCorruptReplicaBlockIds * return the correct values */ -public class TestCorruptReplicaInfo extends TestCase { +public class TestCorruptReplicaInfo { private static final Log LOG = LogFactory.getLog(TestCorruptReplicaInfo.class); @@ -60,6 +64,7 @@ public class TestCorruptReplicaInfo extends TestCase { return getBlock((long)block_id); } + @Test public void testCorruptReplicaInfo() throws IOException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java index e43310cb437..33369720c11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java @@ -17,21 +17,25 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; - -import junit.framework.TestCase; +import org.junit.Test; /** * This class tests that methods in DatanodeDescriptor */ -public class TestDatanodeDescriptor extends TestCase { +public class TestDatanodeDescriptor { /** * Test that getInvalidateBlocks observes the maxlimit. */ + @Test public void testGetInvalidateBlocks() throws Exception { final int MAX_BLOCKS = 10; final int REMAINING_BLOCKS = 2; @@ -49,6 +53,7 @@ public class TestDatanodeDescriptor extends TestCase { assertEquals(bc.length, REMAINING_BLOCKS); } + @Test public void testBlocksCounter() throws Exception { DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor(); assertEquals(0, dd.numBlocks()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index 2d7a122c465..7448da776ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.ArrayList; +import static org.junit.Assert.assertEquals; -import junit.framework.TestCase; +import java.util.ArrayList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -34,17 +34,19 @@ import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.junit.Test; /** * Test if FSNamesystem handles heartbeat right */ -public class TestHeartbeatHandling extends TestCase { +public class TestHeartbeatHandling { /** * Test if * {@link FSNamesystem#handleHeartbeat} * can pick up replication and/or invalidate requests and observes the max * limit */ + @Test public void testHeartbeat() throws Exception { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java index 081438075c1..151d035135f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java @@ -18,13 +18,15 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.hdfs.DFSTestUtil; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; - public class TestHost2NodesMap { private Host2NodesMap map = new Host2NodesMap(); private DatanodeDescriptor dataNodes[]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java index d4be55660cf..80c8eb50083 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertTrue; + import java.util.Collection; import java.util.Iterator; import java.util.concurrent.TimeoutException; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -32,11 +32,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager; -import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * Test if live nodes count per node is correct @@ -45,7 +43,7 @@ import org.apache.hadoop.util.Time; * Two of the "while" loops below use "busy wait" * because they are detecting transient states. */ -public class TestNodeCount extends TestCase { +public class TestNodeCount { final short REPLICATION_FACTOR = (short)2; final long TIMEOUT = 20000L; long timeout = 0; @@ -53,6 +51,7 @@ public class TestNodeCount extends TestCase { Block lastBlock = null; NumberReplicas lastNum = null; + @Test public void testNodeCount() throws Exception { // start a mini dfs cluster of 2 nodes final Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index 68b3f3ec976..dc8578e13ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -18,12 +18,13 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.util.Time.now; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java index bc6c4dfb452..3c7ad8ca021 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import java.util.Queue; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java index 0d138e05d1a..dc390d2e593 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java @@ -17,17 +17,19 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import junit.framework.TestCase; -import java.lang.System; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import org.apache.hadoop.hdfs.protocol.Block; +import org.junit.Test; /** * This class tests the internals of PendingReplicationBlocks.java */ -public class TestPendingReplication extends TestCase { +public class TestPendingReplication { final static int TIMEOUT = 3; // 3 seconds + @Test public void testPendingReplication() { PendingReplicationBlocks pendingReplications; pendingReplications = new PendingReplicationBlocks(TIMEOUT * 1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index eacd09b374a..8c8674dbeba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; @@ -34,8 +37,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** * Test when RBW block is removed. Invalidation of the corrupted block happens diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ce570f7eba2..520ed092c71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.File; import java.util.ArrayList; @@ -34,7 +36,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java index 7f208a4e34a..979ab888f03 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -27,8 +27,10 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.junit.Test; -public class TestUnderReplicatedBlocks extends TestCase { +public class TestUnderReplicatedBlocks { + @Test public void testSetrepIncWithUnderReplicatedBlocks() throws Exception { Configuration conf = new HdfsConfiguration(); final short REPLICATION_FACTOR = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java index b1ffca82271..e4f9697f465 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java @@ -17,19 +17,21 @@ */ package org.apache.hadoop.hdfs.server.common; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import java.io.IOException; import java.net.URI; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.junit.Test; /** * This is a unit test, which tests {@link Util#stringAsURI(String)} * for Windows and Unix style file paths. */ -public class TestGetUriFromString extends TestCase { +public class TestGetUriFromString { private static final Log LOG = LogFactory.getLog(TestGetUriFromString.class); private static final String RELATIVE_FILE_PATH = "relativeFilePath"; @@ -49,6 +51,7 @@ public class TestGetUriFromString extends TestCase { * Test for a relative path, os independent * @throws IOException */ + @Test public void testRelativePathAsURI() throws IOException { URI u = Util.stringAsURI(RELATIVE_FILE_PATH); LOG.info("Uri: " + u); @@ -59,6 +62,7 @@ public class TestGetUriFromString extends TestCase { * Test for an OS dependent absolute paths. * @throws IOException */ + @Test public void testAbsolutePathAsURI() throws IOException { URI u = null; u = Util.stringAsURI(ABSOLUTE_PATH_WINDOWS); @@ -74,6 +78,7 @@ public class TestGetUriFromString extends TestCase { * Test for a URI * @throws IOException */ + @Test public void testURI() throws IOException { LOG.info("Testing correct Unix URI: " + URI_UNIX); URI u = Util.stringAsURI(URI_UNIX); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java index b10d27e4350..3b07fe7978a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index ee6a7b55901..b9f58baef35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -47,15 +51,10 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Time; - import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; /** * This class tests if block replacement request to data nodes work correctly. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java index e2954855ea1..b461e3a9f3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java @@ -23,10 +23,10 @@ import java.util.List; import javax.management.MBeanServer; import javax.management.ObjectName; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.conf.Configuration; -import org.junit.Test; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.Assert; +import org.junit.Test; /** * Class for testing {@link DataNodeMXBean} implementation diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 35e83fa1b24..81748ba2cff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.List; import java.util.Random; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java index fe6e8b7973c..4e4df657ded 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java @@ -19,12 +19,13 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; -import junit.framework.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -74,7 +75,7 @@ public class TestDeleteBlockPool { // Although namenode is shutdown, the bp offerservice is still running try { dn1.deleteBlockPool(bpid1, true); - Assert.fail("Must not delete a running block pool"); + fail("Must not delete a running block pool"); } catch (IOException expected) { } @@ -85,7 +86,7 @@ public class TestDeleteBlockPool { try { dn1.deleteBlockPool(bpid1, false); - Assert.fail("Must not delete if any block files exist unless " + fail("Must not delete if any block files exist unless " + "force is true"); } catch (IOException expected) { } @@ -115,7 +116,7 @@ public class TestDeleteBlockPool { // on dn2 try { dn2.deleteBlockPool(bpid1, true); - Assert.fail("Must not delete a running block pool"); + fail("Must not delete a running block pool"); } catch (IOException expected) { } @@ -180,21 +181,21 @@ public class TestDeleteBlockPool { Configuration nn1Conf = cluster.getConfiguration(0); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1"); dn1.refreshNamenodes(nn1Conf); - Assert.assertEquals(1, dn1.getAllBpOs().length); + assertEquals(1, dn1.getAllBpOs().length); DFSAdmin admin = new DFSAdmin(nn1Conf); String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort(); String[] args = { "-deleteBlockPool", dn1Address, bpid2 }; int ret = admin.run(args); - Assert.assertFalse(0 == ret); + assertFalse(0 == ret); verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2); verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2); String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" }; ret = admin.run(forceArgs); - Assert.assertEquals(0, ret); + assertEquals(0, ret); verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2); verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2); @@ -216,7 +217,7 @@ public class TestDeleteBlockPool { + bpid); if (shouldExist == false) { - Assert.assertFalse(bpDir.exists()); + assertFalse(bpDir.exists()); } else { File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); File finalizedDir = new File(bpCurrentDir, @@ -224,9 +225,9 @@ public class TestDeleteBlockPool { File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); File versionFile = new File(bpCurrentDir, "VERSION"); - Assert.assertTrue(finalizedDir.isDirectory()); - Assert.assertTrue(rbwDir.isDirectory()); - Assert.assertTrue(versionFile.exists()); + assertTrue(finalizedDir.isDirectory()); + assertTrue(rbwDir.isDirectory()); + assertTrue(versionFile.exists()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index ea0143eacdf..045df0bbb80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -25,8 +31,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -38,15 +42,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; +import org.junit.Test; /** * Tests {@link DirectoryScanner} handling of differences * between blocks on the disk and block in memory. */ -public class TestDirectoryScanner extends TestCase { +public class TestDirectoryScanner { private static final Log LOG = LogFactory.getLog(TestDirectoryScanner.class); private static final Configuration CONF = new HdfsConfiguration(); private static final int DEFAULT_GEN_STAMP = 9999; @@ -218,6 +223,7 @@ public class TestDirectoryScanner extends TestCase { assertEquals(mismatchBlocks, stats.mismatchBlocks); } + @Test public void testDirectoryScanner() throws Exception { // Run the test with and without parallel scanning for (int parallelism = 1; parallelism < 3; parallelism++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java index cf2e4483de1..b293075ddd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.test.MetricsAsserts.*; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import java.util.EnumSet; import java.util.Random; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index 2d6f2103796..b3719ad63bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -18,7 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index a5c85510743..8ff6cb88866 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import static org.junit.Assert.*; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java index e35447391c1..db1cbbc8cb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java @@ -23,7 +23,6 @@ import static org.junit.Assert.fail; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReplicaMap; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 4ac7060436c..16e5e7791a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; @@ -37,33 +43,29 @@ import java.util.Set; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; +import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.mockito.Mockito; import org.mockito.Matchers; +import org.mockito.Mockito; import com.google.common.base.Joiner; -import com.google.common.collect.Lists; import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.io.Files; -import static org.junit.Assert.*; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; - /** * Utility functions for testing fsimage storage. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index a3f84533cf9..fbe48bd2e64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Options.Rename; @@ -46,7 +47,6 @@ import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; /** * OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index d7bb0f7cbc7..59b6cc21c85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; -import org.junit.Before; -import org.junit.After; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.BufferedReader; import java.io.File; @@ -44,6 +45,8 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; import org.apache.log4j.RollingFileAppender; +import org.junit.After; +import org.junit.Before; import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 5d93b8cf45f..5c6b36493ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java index 20d4c720dec..8536a2cb66e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; -import junit.framework.Assert; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -120,7 +122,7 @@ public class TestCheckPointForSecurityTokens { renewToken(token1); renewToken(token2); } catch (IOException e) { - Assert.fail("Could not renew or cancel the token"); + fail("Could not renew or cancel the token"); } namesystem = cluster.getNamesystem(); @@ -148,7 +150,7 @@ public class TestCheckPointForSecurityTokens { renewToken(token5); } catch (IOException e) { - Assert.fail("Could not renew or cancel the token"); + fail("Could not renew or cancel the token"); } // restart cluster again @@ -171,7 +173,7 @@ public class TestCheckPointForSecurityTokens { renewToken(token5); cancelToken(token5); } catch (IOException e) { - Assert.fail("Could not renew or cancel the token"); + fail("Could not renew or cancel the token"); } } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index af285cc3ebb..b2496f92b15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -18,24 +18,26 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import junit.framework.TestCase; +import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints; +import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs; -import java.lang.management.ManagementFactory; -import java.net.InetSocketAddress; import java.io.File; import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.InetSocketAddress; import java.net.URI; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Random; +import junit.framework.TestCase; + import org.apache.commons.cli.ParseException; -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -44,6 +46,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -78,9 +81,6 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.primitives.Ints; -import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints; -import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs; - /** * This class tests the creation and validation of a checkpoint. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index 445b7447784..4330317d6ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java index dacd03bbfc7..d399ddf856a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertTrue; + import java.net.URL; import java.util.Collection; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 73550b38b9f..d78198ab402 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.concurrent.TimeoutException; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -36,9 +37,9 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; +import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; @@ -121,7 +122,7 @@ public class TestDeadDatanode { // Ensure blockReceived call from dead datanode is rejected with IOException try { dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks); - Assert.fail("Expected IOException is not thrown"); + fail("Expected IOException is not thrown"); } catch (IOException ex) { // Expected } @@ -132,7 +133,7 @@ public class TestDeadDatanode { new long[] { 0L, 0L, 0L }) }; try { dnp.blockReport(reg, poolId, report); - Assert.fail("Expected IOException is not thrown"); + fail("Expected IOException is not thrown"); } catch (IOException ex) { // Expected } @@ -142,8 +143,8 @@ public class TestDeadDatanode { StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0, 0, 0) }; DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0).getCommands(); - Assert.assertEquals(1, cmd.length); - Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER + assertEquals(1, cmd.length); + assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER .getAction()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index aa60bcc5673..13bae8e048d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -17,20 +17,34 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import junit.framework.TestCase; -import java.io.*; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintWriter; +import java.io.RandomAccessFile; +import java.io.StringWriter; import java.net.URI; -import java.util.Collection; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; import java.util.ArrayList; -import java.util.Collections; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.Random; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -41,18 +55,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.*; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; - +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.common.Util; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; @@ -60,19 +71,16 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.aspectj.util.FileUtil; - -import org.mockito.Mockito; import org.junit.Test; +import org.mockito.Mockito; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; -import static org.apache.hadoop.test.MetricsAsserts.*; - /** * This class tests the creation and validation of a checkpoint. */ -public class TestEditLog extends TestCase { +public class TestEditLog { static { ((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL); @@ -164,6 +172,7 @@ public class TestEditLog extends TestCase { /** * Test case for an empty edit log from a prior version of Hadoop. */ + @Test public void testPreTxIdEditLogNoEdits() throws Exception { FSNamesystem namesys = Mockito.mock(FSNamesystem.class); namesys.dir = Mockito.mock(FSDirectory.class); @@ -177,6 +186,7 @@ public class TestEditLog extends TestCase { * Test case for loading a very simple edit log from a format * prior to the inclusion of edit transaction IDs in the log. */ + @Test public void testPreTxidEditLogWithEdits() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -205,6 +215,7 @@ public class TestEditLog extends TestCase { /** * Simple test for writing to and rolling the edit log. */ + @Test public void testSimpleEditLog() throws IOException { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -249,6 +260,7 @@ public class TestEditLog extends TestCase { /** * Tests transaction logging in dfs. */ + @Test public void testMultiThreadedEditLog() throws IOException { testEditLog(2048); // force edit buffer to automatically sync on each log of edit log entry @@ -398,6 +410,7 @@ public class TestEditLog extends TestCase { }).get(); } + @Test public void testSyncBatching() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -460,6 +473,7 @@ public class TestEditLog extends TestCase { * This sequence is legal and can occur if enterSafeMode() is closely * followed by saveNamespace. */ + @Test public void testBatchedSyncWithClosedLogs() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -499,6 +513,7 @@ public class TestEditLog extends TestCase { } } + @Test public void testEditChecksum() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -542,6 +557,7 @@ public class TestEditLog extends TestCase { * Test what happens if the NN crashes when it has has started but * had no transactions written. */ + @Test public void testCrashRecoveryNoTransactions() throws Exception { testCrashRecovery(0); } @@ -550,6 +566,7 @@ public class TestEditLog extends TestCase { * Test what happens if the NN crashes when it has has started and * had a few transactions written */ + @Test public void testCrashRecoveryWithTransactions() throws Exception { testCrashRecovery(150); } @@ -659,22 +676,26 @@ public class TestEditLog extends TestCase { } // should succeed - only one corrupt log dir + @Test public void testCrashRecoveryEmptyLogOneDir() throws Exception { doTestCrashRecoveryEmptyLog(false, true, true); } // should fail - seen_txid updated to 3, but no log dir contains txid 3 + @Test public void testCrashRecoveryEmptyLogBothDirs() throws Exception { doTestCrashRecoveryEmptyLog(true, true, false); } // should succeed - only one corrupt log dir + @Test public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId() throws Exception { doTestCrashRecoveryEmptyLog(false, false, true); } // should succeed - both log dirs corrupt, but seen_txid never updated + @Test public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId() throws Exception { doTestCrashRecoveryEmptyLog(true, false, true); @@ -822,6 +843,7 @@ public class TestEditLog extends TestCase { } } + @Test public void testFailedOpen() throws Exception { File logDir = new File(TEST_DIR, "testFailedOpen"); logDir.mkdirs(); @@ -843,6 +865,7 @@ public class TestEditLog extends TestCase { * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if * logSync isn't called periodically, the edit log will sync itself. */ + @Test public void testAutoSync() throws Exception { File logDir = new File(TEST_DIR, "testAutoSync"); logDir.mkdirs(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java index d39df4030d8..de4b471110b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java @@ -26,15 +26,15 @@ import java.io.File; import java.io.IOException; import java.nio.channels.FileChannel; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DU; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; +import org.apache.hadoop.util.StringUtils; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 819614993d4..17aacaac885 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -17,41 +17,41 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.logging.Log; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; - -import java.io.*; +import java.io.File; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.permission.*; - +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; - -import static org.junit.Assert.*; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import static org.mockito.Mockito.*; - /** * This class tests various synchronization bugs in FSEditLog rolling * and namespace saving. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java index 5e828b65e14..9feeada276a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index a54df2ca818..c59c4128039 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -21,6 +21,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.spy; import java.io.BufferedInputStream; import java.io.File; @@ -29,8 +31,6 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.channels.FileChannel; -import java.util.Map; -import java.util.Set; import java.util.SortedMap; import org.apache.commons.logging.impl.Log4JLogger; @@ -44,19 +44,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; import org.apache.log4j.Level; import org.junit.Test; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.google.common.io.Files; - -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.spy; public class TestFSEditLogLoader { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java index 649c4152871..01d54b814db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java @@ -17,27 +17,22 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; - -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.spy; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - -import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; +import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.junit.Test; -import org.mockito.Mockito; public class TestFSImageStorageInspector { private static final Log LOG = LogFactory.getLog( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java index de3a89c0834..f0c5c688f43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.junit.Assert.*; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index 84bb8f0ed9a..c03dd3f96e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -17,47 +17,36 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import java.net.URI; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Arrays; -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; - -import java.io.RandomAccessFile; import java.io.File; import java.io.FilenameFilter; -import java.io.BufferedInputStream; -import java.io.DataInputStream; import java.io.IOException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.security.SecurityUtil; -import org.junit.Test; +import java.io.RandomAccessFile; +import java.net.URI; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; +import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL; +import org.junit.Test; +import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; import com.google.common.collect.TreeMultiset; -import com.google.common.base.Joiner; - -import java.util.zip.CheckedInputStream; -import java.util.zip.Checksum; public class TestFileJournalManager { static final Log LOG = LogFactory.getLog(TestFileJournalManager.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java index 43e1c157b5a..7fd6f47ad5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java @@ -17,27 +17,28 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.junit.Test; /** * This class tests that a file system adheres to the limit of * maximum number of files that is configured. */ -public class TestFileLimit extends TestCase { +public class TestFileLimit { static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; boolean simulatedStorage = false; @@ -75,6 +76,7 @@ public class TestFileLimit extends TestCase { /** * Test that file data becomes available before file is closed. */ + @Test public void testFileLimit() throws IOException { Configuration conf = new HdfsConfiguration(); int maxObjects = 5; @@ -166,6 +168,7 @@ public class TestFileLimit extends TestCase { } } + @Test public void testFileLimitSimulated() throws IOException { simulatedStorage = true; testFileLimit(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index cef0a0db879..fd79ad151f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.mock; @@ -30,12 +31,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index aec35a7914e..b79904518ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; @@ -57,7 +61,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; -import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java index f21f65ee013..62b75621f06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.junit.Test; - +import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; -import static org.junit.Assert.*; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import java.io.IOException; import java.net.URI; import java.util.Collection; -import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.Test; public class TestGenericJournalConf { /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java index 62fac1998f0..d040278c5ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index e87cad63b0c..0882d18386c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -25,10 +25,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 3443fa814f4..f124879b1a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -25,9 +27,6 @@ import java.nio.channels.FileChannel; import java.util.Collection; import java.util.Random; -import org.junit.Test; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -35,13 +34,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.BlockMissingException; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.StringUtils; +import org.junit.Test; /** * This class tests the listCorruptFileBlocks API. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 9ec5d95ba6c..2f2b6888fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -17,27 +17,26 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.junit.Test; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import java.io.BufferedReader; -import java.io.FileInputStream; -import java.io.DataInputStream; -import java.io.InputStreamReader; -import java.io.IOException; -import java.lang.InterruptedException; -import java.util.Random; import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.DataInputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Random; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; - +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; /** * This class tests the creation and validation of metasave diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index e7a9cc1d49a..d25676891a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; +import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals; + import java.io.File; import java.io.IOException; @@ -31,11 +36,6 @@ import org.junit.Test; import com.google.common.base.Joiner; -import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - /** * Functional tests for NNStorageRetentionManager. This differs from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index 4c6334f53ad..df704998889 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; + import java.io.IOException; import java.util.List; import java.util.Map; @@ -25,13 +29,9 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; +import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java index 89716910d9e..8d635f63bc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index fcbc4890178..ab013b5fbfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; import java.lang.management.ManagementFactory; @@ -28,17 +29,15 @@ import java.util.Map; import javax.management.MBeanServer; import javax.management.ObjectName; +import junit.framework.Assert; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.util.VersionInfo; - import org.junit.Test; import org.mortbay.util.ajax.JSON; -import junit.framework.Assert; - /** * Class for testing {@link NameNodeMXBean} implementation */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index f25e0c1888c..128bd5f085c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -18,16 +18,18 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.spy; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.util.HashSet; import java.util.Set; -import static org.junit.Assert.*; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.spy; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -37,10 +39,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; -import org.apache.hadoop.hdfs.server.namenode.FSImage; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java index 53eb88402d2..e73d71aff70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.net.URISyntaxException; @@ -34,10 +38,6 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - public class TestNameNodeResourceChecker { private Configuration conf; private File baseDir; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java index 49a96e9b66d..6e0657c8d6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index 239ff781232..2883f99f692 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.util.ArrayList; import java.util.List; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -34,13 +34,14 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.junit.Test; /** * This tests InterDataNodeProtocol for block handling. */ -public class TestNamenodeCapacityReport extends TestCase { +public class TestNamenodeCapacityReport { private static final Log LOG = LogFactory.getLog(TestNamenodeCapacityReport.class); /** @@ -48,6 +49,7 @@ public class TestNamenodeCapacityReport extends TestCase { * It verifies the block information from a datanode. * Then, it updates the block with new information and verifies again. */ + @Test public void testVolumeSize() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java index 4b8409535c1..f9ba34e15f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java @@ -18,32 +18,37 @@ package org.apache.hadoop.hdfs.server.namenode; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Collections; +import java.util.List; + +import junit.framework.AssertionFailedError; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; - -import java.util.Collections; -import java.util.List; - -import java.io.File; +import org.junit.Test; /** * A JUnit test for checking if restarting DFS preserves integrity. * Specifically with FSImage being written in parallel */ -public class TestParallelImageWrite extends TestCase { +public class TestParallelImageWrite { private static final int NUM_DATANODES = 4; /** check if DFS remains in proper condition after a restart */ + @Test public void testRestartDFS() throws Exception { final Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java index 506630fcb3c..9a712ef28b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.hdfs.server.namenode.INode; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.fs.Path; - -import org.junit.Test; +import static org.junit.Assert.assertTrue; import java.util.Arrays; -import static org.junit.Assert.*; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSUtil; +import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index ce887007084..d0764b98da6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.anyObject; -import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index 22857c902e7..e3056e9b0bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -17,28 +17,30 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; import java.net.URI; import java.util.Iterator; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; - import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.junit.Test; /** * This class tests the creation and validation of a checkpoint. */ -public class TestSecurityTokenEditLog extends TestCase { +public class TestSecurityTokenEditLog { static final int NUM_DATA_NODES = 1; // This test creates NUM_THREADS threads and each thread does @@ -85,6 +87,7 @@ public class TestSecurityTokenEditLog extends TestCase { /** * Tests transaction logging in dfs. */ + @Test public void testEditLog() throws IOException { // start a cluster diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 717dc74627d..90fa4d475f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -19,6 +19,10 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; @@ -29,8 +33,6 @@ import java.util.Iterator; import java.util.List; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -41,7 +43,6 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; - import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -56,13 +57,15 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; -import org.junit.Assert; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * Startup and checkpoint tests * */ -public class TestStartup extends TestCase { +public class TestStartup { public static final String NAME_NODE_HOST = "localhost:"; public static final String WILDCARD_HTTP_HOST = "0.0.0.0:"; private static final Log LOG = @@ -88,8 +91,8 @@ public class TestStartup extends TestCase { } - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()); @@ -115,7 +118,7 @@ public class TestStartup extends TestCase { /** * clean up */ - @Override + @After public void tearDown() throws Exception { if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory in tearDown '" + hdfsDir + "'"); @@ -258,6 +261,7 @@ public class TestStartup extends TestCase { * checkpoint for edits and image is the same directory * @throws IOException */ + @Test public void testChkpointStartup2() throws IOException{ LOG.info("--starting checkpointStartup2 - same directory for checkpoint"); // different name dirs @@ -283,6 +287,7 @@ public class TestStartup extends TestCase { * checkpoint for edits and image are different directories * @throws IOException */ + @Test public void testChkpointStartup1() throws IOException{ //setUpConfig(); LOG.info("--starting testStartup Recovery"); @@ -307,6 +312,7 @@ public class TestStartup extends TestCase { * secondary node copies fsimage and edits into correct separate directories. * @throws IOException */ + @Test public void testSNNStartup() throws IOException{ //setUpConfig(); LOG.info("--starting SecondNN startup test"); @@ -370,6 +376,7 @@ public class TestStartup extends TestCase { } } + @Test public void testCompression() throws IOException { LOG.info("Test compressing image."); Configuration conf = new Configuration(); @@ -426,6 +433,7 @@ public class TestStartup extends TestCase { namenode.join(); } + @Test public void testImageChecksum() throws Exception { LOG.info("Test uncompressed image checksum"); testImageChecksum(false); @@ -493,6 +501,7 @@ public class TestStartup extends TestCase { * restarts, the still alive datanodes should not have any trouble in getting * registrant again. */ + @Test public void testNNRestart() throws IOException, InterruptedException { MiniDFSCluster cluster = null; FileSystem localFileSys; @@ -527,7 +536,7 @@ public class TestStartup extends TestCase { cluster.restartNameNode(); NamenodeProtocols nn = cluster.getNameNodeRpc(); assertNotNull(nn); - Assert.assertTrue(cluster.isDataNodeUp()); + assertTrue(cluster.isDataNodeUp()); DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE); for (int i = 0 ; i < 5 && info.length != numDatanodes; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java index 1a40159bc9c..2fb784d46f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.net.URI; import java.util.Collections; -import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -67,7 +69,7 @@ public class TestStartupOptionUpgrade { public void testStartupOptUpgradeFrom204() throws Exception { layoutVersion = Feature.RESERVED_REL20_204.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertTrue("Clusterid should start with CID", storage.getClusterID() + assertTrue("Clusterid should start with CID", storage.getClusterID() .startsWith("CID")); } @@ -83,7 +85,7 @@ public class TestStartupOptionUpgrade { startOpt.setClusterId("cid"); layoutVersion = Feature.RESERVED_REL22.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the given clusterid", + assertEquals("Clusterid should match with the given clusterid", "cid", storage.getClusterID()); } @@ -101,7 +103,7 @@ public class TestStartupOptionUpgrade { storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the existing one", + assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); } @@ -119,7 +121,7 @@ public class TestStartupOptionUpgrade { storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the existing one", + assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); } @@ -137,7 +139,7 @@ public class TestStartupOptionUpgrade { storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the existing one", + assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 412ab4a4611..de5d1d0c1f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -18,9 +18,14 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; import java.io.File; import java.io.IOException; @@ -28,9 +33,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.Set; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.spy; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.cli.CLITestCmdDFS; @@ -45,11 +47,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; - -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java index 2ac39151bb1..ae3f1a9459e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java index 37e29679659..4e9efd5a57e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java @@ -17,19 +17,21 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; -import org.junit.Test; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; -import junit.framework.Assert; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; /** * This class tests the validation of the configuration object when passed @@ -53,7 +55,7 @@ public class TestValidateConfigurationSettings { DFSTestUtil.formatNameNode(conf); try { NameNode nameNode = new NameNode(conf); - Assert.fail("Should have throw the exception since the ports match"); + fail("Should have throw the exception since the ports match"); } catch (IOException e) { // verify we're getting the right IOException assertTrue(e.toString().contains("dfs.namenode.rpc-address (")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 4f93f4f43b5..2dc3d1d616a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import java.io.ByteArrayInputStream; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; import java.net.URI; -import java.util.Collections; import java.util.List; import java.util.Set; @@ -36,20 +38,16 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.After; import org.junit.Before; import org.junit.Test; -import com.google.common.base.Suppliers; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; -import static org.junit.Assert.*; - public class TestBootstrapStandby { private static final Log LOG = LogFactory.getLog(TestBootstrapStandby.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java index ccc46a204b3..4f213b24055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.net.URI; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java index 52e136940de..98ec33e5d24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.concurrent.TimeoutException; @@ -26,11 +27,11 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ha.ClientBaseWithFixes; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HealthMonitor; +import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer; import org.apache.hadoop.ha.ZKFCTestUtil; import org.apache.hadoop.ha.ZKFailoverController; -import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -44,7 +45,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; - import com.google.common.base.Supplier; public class TestDFSZKFailoverController extends ClientBaseWithFixes { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 2a144b88c9c..96a890e2228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -41,12 +41,12 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index f79ab109ce4..cd090cbe6cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -17,7 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.DataInputStream; @@ -32,6 +37,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -56,7 +62,6 @@ import org.junit.BeforeClass; import org.junit.Test; import com.google.common.base.Joiner; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; /** * Test case for client support of delegation tokens in an HA cluster. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index bc5c487a766..8675fa3fc6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -22,8 +22,6 @@ import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; import java.net.URI; -import java.util.List; -import java.util.concurrent.TimeoutException; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java index 794a3b6bf35..dd5c1bab75c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import java.io.File; import java.io.FileOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java index ed5b8e76e23..9506a2f399a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java @@ -19,7 +19,10 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java index 9aa01221b3e..e8647fdde1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.tools.DFSck; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java index abd7c72f17d..ab3f3ca4f75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.junit.Assert.*; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.io.IOException; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java index 10218f218ec..49d89592b8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.io.PrintStream; -import junit.framework.Assert; - import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; @@ -96,7 +95,7 @@ public class TestHAFsck { new String[]{"/", "-files"}); String result = bStream.toString(); System.out.println("output from fsck:\n" + result); - Assert.assertEquals(0, errCode); + assertEquals(0, errCode); assertTrue(result.contains("/test1")); assertTrue(result.contains("/test2")); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index c4abfd53f46..b147f4fd1b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java index e44ebc9b4d9..83bd7bf13b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java @@ -37,13 +37,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; +import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java index be014301179..da6ec3d872c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.net.URL; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index 7bc49f7d9d7..72110b29a84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; import java.net.URISyntaxException; @@ -42,8 +48,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; - public class TestInitializeSharedEdits { private static final Log LOG = LogFactory.getLog(TestInitializeSharedEdits.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java index e5b53ba3cfc..ab2a8dd0614 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java @@ -22,8 +22,6 @@ import static org.junit.Assert.fail; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; -import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java index 5800d3a351d..6ceecc79bb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 3fa89105a52..61016c9540e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java index 45b3b02997c..c029a673ad3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java @@ -17,25 +17,28 @@ */ package org.apache.hadoop.hdfs.server.namenode.metrics; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import static org.apache.hadoop.test.MetricsAsserts.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * Test case for FilesInGetListingOps metric in Namenode */ -public class TestNNMetricFilesInGetListingOps extends TestCase { +public class TestNNMetricFilesInGetListingOps { private static final Configuration CONF = new HdfsConfiguration(); private static final String NN_METRICS = "NameNodeActivity"; static { @@ -49,16 +52,16 @@ public class TestNNMetricFilesInGetListingOps extends TestCase { private DistributedFileSystem fs; private Random rand = new Random(); - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { cluster = new MiniDFSCluster.Builder(CONF).build(); cluster.waitActive(); cluster.getNameNode(); fs = (DistributedFileSystem) cluster.getFileSystem(); } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { cluster.shutdown(); } @@ -69,6 +72,7 @@ public class TestNNMetricFilesInGetListingOps extends TestCase { } + @Test public void testFilesInGetListingOps() throws Exception { createFile("/tmp1/t1", 3200, (short)3); createFile("/tmp1/t2", 3200, (short)3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 20234e57068..9e2d7c3f58f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.metrics; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.assertGauge; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; import java.io.DataInputStream; import java.io.IOException; @@ -37,8 +37,8 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 71dcce49e2d..61e8ebef5c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -18,29 +18,30 @@ package org.apache.hadoop.hdfs.tools; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; +import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HAServiceTarget; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.ZKFCProtocol; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.test.MockitoUtil; - import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index a2f4791a8c3..38380b55f98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -17,26 +17,26 @@ */ package org.apache.hadoop.hdfs.tools; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.PrintStream; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAAdmin; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.ha.HAAdmin; -import org.apache.hadoop.ha.NodeFencer; import org.apache.log4j.Level; - import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java index d55a2583b99..652979eab48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java @@ -17,6 +17,16 @@ */ package org.apache.hadoop.hdfs.tools; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -27,15 +37,9 @@ import java.util.List; import java.util.Map; import java.util.StringTokenizer; -import static org.junit.Assert.*; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; - import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.tools.GetConf; import org.apache.hadoop.hdfs.tools.GetConf.Command; import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler; import org.apache.hadoop.net.NetUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java index a6746a249b6..aabd5f685e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java @@ -18,29 +18,25 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.File; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.Map; -import java.util.HashMap; - -import org.junit.Test; -import org.junit.Before; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.HashMap; +import java.util.Map; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; -import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer; -import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; import org.apache.hadoop.hdfs.DFSTestUtil; - +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper; +import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; +import org.junit.Before; +import org.junit.Test; public class TestOfflineEditsViewer { private static final Log LOG = LogFactory.getLog(TestOfflineEditsViewer.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java index 5417c5387bb..a5501d97547 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java @@ -17,20 +17,23 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; -import junit.framework.TestCase; - import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; +import org.junit.Test; /** * Test that the DelimitedImageVisistor gives the expected output based * on predetermined inputs */ -public class TestDelimitedImageVisitor extends TestCase { +public class TestDelimitedImageVisitor { private static String ROOT = System.getProperty("test.build.data","/tmp"); private static final String delim = "--"; @@ -44,6 +47,7 @@ public class TestDelimitedImageVisitor extends TestCase { sb.append(delim); } + @Test public void testDelimitedImageVisistor() { String filename = ROOT + "/testDIV"; File f = new File(filename); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java index 4cfed3c3bba..dcdc6225630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java @@ -17,16 +17,18 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.Collections; import java.util.HashSet; import java.util.Set; import org.apache.hadoop.hdfs.tools.offlineImageViewer.SpotCheckImageVisitor.ImageInfo; +import org.junit.Test; -import junit.framework.TestCase; - -public class TestOIVCanReadOldVersions extends TestCase { +public class TestOIVCanReadOldVersions { // Location of fsimage files during testing. public static final String TEST_CACHE_DATA_DIR = System.getProperty("test.cache.data", "build/test/cache"); @@ -35,6 +37,7 @@ public class TestOIVCanReadOldVersions extends TestCase { // layout versions. These fsimages were previously generated and stored // with the test. Test success indicates that no changes have been made // to the OIV that causes older fsimages to be incorrectly processed. + @Test public void testOldFSImages() { // Define the expected values from the prior versions, as they were created // and verified at time of creation diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 2a5b1b671de..4f4cecd5007 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import java.io.BufferedReader; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.security.token.Token; +import java.io.BufferedReader; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.EOFException; @@ -37,20 +38,23 @@ import java.util.LinkedList; import java.util.List; import java.util.Set; -import org.junit.*; -import static org.junit.Assert.*; - +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.security.token.Token; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java index 0180732af0e..9549356a7b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileNotFoundException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java index 60451eb1ff4..2aba515550f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.Arrays; @@ -24,7 +25,10 @@ import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -public class TestCyclicIteration extends junit.framework.TestCase { +import org.junit.Test; + +public class TestCyclicIteration { + @Test public void testCyclicIteration() throws Exception { for(int n = 0; n < 5; n++) { checkCyclicIteration(n); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java index 8d2edf3e2e6..8d9a3f9bbf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; import java.nio.ByteBuffer; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java index c64427f77e6..02787be69fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hdfs.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import org.apache.hadoop.hdfs.util.ExactSizeInputStream; import org.junit.Test; public class TestExactSizeInputStream { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java index 9465e46971a..d8d9c7379ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java @@ -17,20 +17,24 @@ */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Random; -import org.junit.Test; -import org.junit.Before; -import static org.junit.Assert.*; - - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.util.Time; +import org.junit.Before; +import org.junit.Test; public class TestLightWeightHashSet{ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java index 5182f164b89..1ccbccf53f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java @@ -17,19 +17,21 @@ */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Random; -import org.junit.Test; -import org.junit.Before; -import static org.junit.Assert.*; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.util.Time; +import org.junit.Before; +import org.junit.Test; public class TestLightWeightLinkedSet { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java index a43774ab33c..6f5b1613360 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.FileOutputStream; import java.io.FileWriter; @@ -24,13 +28,10 @@ import java.io.IOException; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; - public class TestMD5FileUtils { private static final File TEST_DIR_ROOT = new File( System.getProperty("test.build.data","build/test/data")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index 015ce4dde2b..aef467a0ef2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -18,6 +18,11 @@ package org.apache.hadoop.hdfs.web; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.mock; + import java.io.IOException; import java.net.URI; import java.net.URL; @@ -30,9 +35,9 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.web.resources.DelegationParam; -import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; +import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; @@ -41,11 +46,6 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.junit.Assert; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.mock; - public class TestWebHdfsUrl { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java index a18af908fc6..2c4721b6204 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java @@ -18,17 +18,18 @@ package org.apache.hadoop.net; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.util.HashMap; import java.util.Map; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; - -import org.junit.Test; import org.junit.Before; - -import static org.junit.Assert.*; +import org.junit.Test; public class TestNetworkTopology { private final static NetworkTopology cluster = new NetworkTopology(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java index 843f4224482..6de0c6952f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java @@ -17,24 +17,32 @@ */ package org.apache.hadoop.security; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.*; -import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.util.StringUtils; - -import junit.framework.TestCase; +import org.junit.Test; /** Unit tests for permission */ -public class TestPermission extends TestCase { +public class TestPermission { public static final Log LOG = LogFactory.getLog(TestPermission.class); final private static Path ROOT_PATH = new Path("/data"); @@ -65,6 +73,7 @@ public class TestPermission extends TestCase { * either set with old param dfs.umask that takes decimal umasks * or dfs.umaskmode that takes symbolic or octal umask. */ + @Test public void testBackwardCompatibility() { // Test 1 - old configuration key with decimal // umask value should be handled when set using @@ -93,6 +102,7 @@ public class TestPermission extends TestCase { assertEquals(18, FsPermission.getUMask(conf).toShort()); } + @Test public void testCreate() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); @@ -155,6 +165,7 @@ public class TestPermission extends TestCase { } } + @Test public void testFilePermision() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java index bd6e524f7c5..7029f42926a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java @@ -34,8 +34,8 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.mortbay.util.ajax.JSON; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java index c66d0e48b08..6901f6439e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java @@ -18,12 +18,14 @@ package org.apache.hadoop.tools; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; @@ -33,14 +35,16 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.JMXGet; -import static org.apache.hadoop.test.MetricsAsserts.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * Startup and checkpoint tests * */ -public class TestJMXGet extends TestCase { +public class TestJMXGet { private Configuration config; private MiniDFSCluster cluster; @@ -62,15 +66,15 @@ public class TestJMXGet extends TestCase { } - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { config = new HdfsConfiguration(); } /** * clean up */ - @Override + @After public void tearDown() throws Exception { if(cluster.isClusterUp()) cluster.shutdown(); @@ -86,6 +90,7 @@ public class TestJMXGet extends TestCase { * test JMX connection to NameNode.. * @throws Exception */ + @Test public void testNameNode() throws Exception { int numDatanodes = 2; cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); @@ -114,6 +119,7 @@ public class TestJMXGet extends TestCase { * test JMX connection to DataNode.. * @throws Exception */ + @Test public void testDataNode() throws Exception { int numDatanodes = 2; cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();