HDFS-3583. Convert remaining tests to Junit4. Contributed by Andrew Wang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1362753 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-07-18 03:46:28 +00:00
parent c700642308
commit e2253b539e
260 changed files with 2120 additions and 1563 deletions

View File

@ -18,6 +18,18 @@
package org.apache.hadoop.fs.http.client;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
@ -44,18 +56,6 @@
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
@RunWith(value = Parameterized.class)
public class TestHttpFSFileSystem extends HFSTestCase {

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.fs.http.client;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
@ -26,8 +28,6 @@
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.net.URI;
@RunWith(value = Parameterized.class)
public class TestWebhdfsFileSystem extends TestHttpFSFileSystem {

View File

@ -18,15 +18,15 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.junit.Test;
import org.mockito.Mockito;
public class TestCheckUploadContentTypeFilter {
@Test

View File

@ -18,7 +18,23 @@
package org.apache.hadoop.fs.http.server;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
@ -38,20 +54,6 @@
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.List;
public class TestHttpFSServer extends HFSTestCase {
@Test
@ -103,9 +105,9 @@ public List<String> getGroups(String user) throws IOException {
}
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "log").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
File secretFile = new File(new File(homeDir, "conf"), "secret");
@ -157,23 +159,23 @@ public void instrumentation() throws Exception {
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line = reader.readLine();
reader.close();
Assert.assertTrue(line.contains("\"counters\":{"));
assertTrue(line.contains("\"counters\":{"));
url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
}
@Test
@ -187,7 +189,7 @@ public void testHdfsAccess() throws Exception {
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
@ -208,7 +210,7 @@ public void testGlobFilter() throws Exception {
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
@ -228,7 +230,7 @@ public void testPutNoOperation() throws Exception {
conn.setDoInput(true);
conn.setDoOutput(true);
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
}
}

View File

@ -19,12 +19,14 @@
package org.apache.hadoop.lib.lang;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.Callable;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
public class TestRunnableCallable extends HTestCase {
public static class R implements Runnable {
@ -59,14 +61,14 @@ public void runnable() throws Exception {
R r = new R();
RunnableCallable rc = new RunnableCallable(r);
rc.run();
Assert.assertTrue(r.RUN);
assertTrue(r.RUN);
r = new R();
rc = new RunnableCallable(r);
rc.call();
Assert.assertTrue(r.RUN);
assertTrue(r.RUN);
Assert.assertEquals(rc.toString(), "R");
assertEquals(rc.toString(), "R");
}
@Test
@ -74,14 +76,14 @@ public void callable() throws Exception {
C c = new C();
RunnableCallable rc = new RunnableCallable(c);
rc.run();
Assert.assertTrue(c.RUN);
assertTrue(c.RUN);
c = new C();
rc = new RunnableCallable(c);
rc.call();
Assert.assertTrue(c.RUN);
assertTrue(c.RUN);
Assert.assertEquals(rc.toString(), "C");
assertEquals(rc.toString(), "C");
}
@Test(expected = RuntimeException.class)

View File

@ -19,7 +19,9 @@
package org.apache.hadoop.lib.lang;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
@ -37,26 +39,26 @@ public String getTemplate() {
@Test
public void testXException() throws Exception {
XException ex = new XException(TestERROR.TC);
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), "TC: {0}");
Assert.assertNull(ex.getCause());
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), "TC: {0}");
assertNull(ex.getCause());
ex = new XException(TestERROR.TC, "msg");
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), "TC: msg");
Assert.assertNull(ex.getCause());
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), "TC: msg");
assertNull(ex.getCause());
Exception cause = new Exception();
ex = new XException(TestERROR.TC, cause);
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), "TC: " + cause.toString());
Assert.assertEquals(ex.getCause(), cause);
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), "TC: " + cause.toString());
assertEquals(ex.getCause(), cause);
XException xcause = ex;
ex = new XException(xcause);
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), xcause.getMessage());
Assert.assertEquals(ex.getCause(), xcause);
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), xcause.getMessage());
assertEquals(ex.getCause(), xcause);
}
}

View File

@ -18,7 +18,10 @@
package org.apache.hadoop.lib.server;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
@ -47,9 +50,9 @@ public Class getInterface() {
@Test
public void baseService() throws Exception {
BaseService service = new MyService();
Assert.assertNull(service.getInterface());
Assert.assertEquals(service.getPrefix(), "myservice");
Assert.assertEquals(service.getServiceDependencies().length, 0);
assertNull(service.getInterface());
assertEquals(service.getPrefix(), "myservice");
assertEquals(service.getServiceDependencies().length, 0);
Server server = Mockito.mock(Server.class);
Configuration conf = new Configuration(false);
@ -60,9 +63,9 @@ public void baseService() throws Exception {
Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice.");
service.init(server);
Assert.assertEquals(service.getPrefixedName("foo"), "server.myservice.foo");
Assert.assertEquals(service.getServiceConfig().size(), 1);
Assert.assertEquals(service.getServiceConfig().get("foo"), "FOO");
Assert.assertTrue(MyService.INIT);
assertEquals(service.getPrefixedName("foo"), "server.myservice.foo");
assertEquals(service.getServiceConfig().size(), 1);
assertEquals(service.getServiceConfig().get("foo"), "FOO");
assertTrue(MyService.INIT);
}
}

View File

@ -18,16 +18,12 @@
package org.apache.hadoop.lib.server;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.lang.XException;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileOutputStream;
@ -39,50 +35,60 @@
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.lang.XException;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
public class TestServer extends HTestCase {
@Test
@TestDir
public void constructorsGetters() throws Exception {
Server server = new Server("server", "/a", "/b", "/c", "/d", new Configuration(false));
Assert.assertEquals(server.getHomeDir(), "/a");
Assert.assertEquals(server.getConfigDir(), "/b");
Assert.assertEquals(server.getLogDir(), "/c");
Assert.assertEquals(server.getTempDir(), "/d");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNotNull(server.getConfig());
assertEquals(server.getHomeDir(), "/a");
assertEquals(server.getConfigDir(), "/b");
assertEquals(server.getLogDir(), "/c");
assertEquals(server.getTempDir(), "/d");
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNotNull(server.getConfig());
server = new Server("server", "/a", "/b", "/c", "/d");
Assert.assertEquals(server.getHomeDir(), "/a");
Assert.assertEquals(server.getConfigDir(), "/b");
Assert.assertEquals(server.getLogDir(), "/c");
Assert.assertEquals(server.getTempDir(), "/d");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNull(server.getConfig());
assertEquals(server.getHomeDir(), "/a");
assertEquals(server.getConfigDir(), "/b");
assertEquals(server.getLogDir(), "/c");
assertEquals(server.getTempDir(), "/d");
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNull(server.getConfig());
server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNotNull(server.getConfig());
assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNotNull(server.getConfig());
server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath());
Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNull(server.getConfig());
assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNull(server.getConfig());
}
@Test
@ -113,9 +119,9 @@ public void initHomeDirNotDir() throws Exception {
@TestDir
public void initNoConfigDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "log").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
@ -127,9 +133,9 @@ public void initNoConfigDir() throws Exception {
@TestDir
public void initConfigDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "log").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
File configDir = new File(homeDir, "conf");
new FileOutputStream(configDir).close();
Configuration conf = new Configuration(false);
@ -143,9 +149,9 @@ public void initConfigDirNotDir() throws Exception {
@TestDir
public void initNoLogDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
@ -157,9 +163,9 @@ public void initNoLogDir() throws Exception {
@TestDir
public void initLogDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
File logDir = new File(homeDir, "log");
new FileOutputStream(logDir).close();
Configuration conf = new Configuration(false);
@ -173,9 +179,9 @@ public void initLogDirNotDir() throws Exception {
@TestDir
public void initNoTempDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "log").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
@ -187,9 +193,9 @@ public void initNoTempDir() throws Exception {
@TestDir
public void initTempDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "log").mkdir());
File tempDir = new File(homeDir, "temp");
new FileOutputStream(tempDir).close();
Configuration conf = new Configuration(false);
@ -204,7 +210,7 @@ public void initTempDirNotDir() throws Exception {
public void siteFileNotAFile() throws Exception {
String homeDir = TestDirHelper.getTestDir().getAbsolutePath();
File siteFile = new File(homeDir, "server-site.xml");
Assert.assertTrue(siteFile.mkdir());
assertTrue(siteFile.mkdir());
Server server = new Server("server", homeDir, homeDir, homeDir, homeDir);
server.init();
}
@ -234,12 +240,12 @@ public LifeCycleService() {
@Override
protected void init() throws ServiceException {
Assert.assertEquals(getServer().getStatus(), Server.Status.BOOTING);
assertEquals(getServer().getStatus(), Server.Status.BOOTING);
}
@Override
public void destroy() {
Assert.assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN);
assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN);
super.destroy();
}
@ -255,12 +261,12 @@ public void lifeCycle() throws Exception {
Configuration conf = new Configuration(false);
conf.set("server.services", LifeCycleService.class.getName());
Server server = createServer(conf);
Assert.assertEquals(server.getStatus(), Server.Status.UNDEF);
assertEquals(server.getStatus(), Server.Status.UNDEF);
server.init();
Assert.assertNotNull(server.get(LifeCycleService.class));
Assert.assertEquals(server.getStatus(), Server.Status.NORMAL);
assertNotNull(server.get(LifeCycleService.class));
assertEquals(server.getStatus(), Server.Status.NORMAL);
server.destroy();
Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
}
@Test
@ -270,7 +276,7 @@ public void startWithStatusNotNormal() throws Exception {
conf.set("server.startup.status", "ADMIN");
Server server = createServer(conf);
server.init();
Assert.assertEquals(server.getStatus(), Server.Status.ADMIN);
assertEquals(server.getStatus(), Server.Status.ADMIN);
server.destroy();
}
@ -334,7 +340,7 @@ public void changeStatus() throws Exception {
Server server = createServer(conf);
server.init();
server.setStatus(Server.Status.ADMIN);
Assert.assertTrue(TestService.LIFECYCLE.contains("serverStatusChange"));
assertTrue(TestService.LIFECYCLE.contains("serverStatusChange"));
}
@Test
@ -357,7 +363,7 @@ public void setSameStatus() throws Exception {
server.init();
TestService.LIFECYCLE.clear();
server.setStatus(server.getStatus());
Assert.assertFalse(TestService.LIFECYCLE.contains("serverStatusChange"));
assertFalse(TestService.LIFECYCLE.contains("serverStatusChange"));
}
@Test
@ -368,9 +374,9 @@ public void serviceLifeCycle() throws Exception {
conf.set("server.services", TestService.class.getName());
Server server = createServer(conf);
server.init();
Assert.assertNotNull(server.get(TestService.class));
assertNotNull(server.get(TestService.class));
server.destroy();
Assert.assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy"));
assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy"));
}
@Test
@ -379,7 +385,7 @@ public void loadingDefaultConfig() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
Assert.assertEquals(server.getConfig().get("testserver.a"), "default");
assertEquals(server.getConfig().get("testserver.a"), "default");
}
@Test
@ -392,7 +398,7 @@ public void loadingSiteConfig() throws Exception {
w.close();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
Assert.assertEquals(server.getConfig().get("testserver.a"), "site");
assertEquals(server.getConfig().get("testserver.a"), "site");
}
@Test
@ -407,7 +413,7 @@ public void loadingSysPropConfig() throws Exception {
w.close();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
Assert.assertEquals(server.getConfig().get("testserver.a"), "sysprop");
assertEquals(server.getConfig().get("testserver.a"), "sysprop");
} finally {
System.getProperties().remove("testserver.a");
}
@ -633,7 +639,7 @@ public void services() throws Exception {
conf = new Configuration(false);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(ORDER.size(), 0);
assertEquals(ORDER.size(), 0);
// 2 services init/destroy
ORDER.clear();
@ -643,17 +649,17 @@ public void services() throws Exception {
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
Assert.assertEquals(server.get(MyService3.class).getInterface(), MyService3.class);
Assert.assertEquals(ORDER.size(), 4);
Assert.assertEquals(ORDER.get(0), "s1.init");
Assert.assertEquals(ORDER.get(1), "s3.init");
Assert.assertEquals(ORDER.get(2), "s1.postInit");
Assert.assertEquals(ORDER.get(3), "s3.postInit");
assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
assertEquals(server.get(MyService3.class).getInterface(), MyService3.class);
assertEquals(ORDER.size(), 4);
assertEquals(ORDER.get(0), "s1.init");
assertEquals(ORDER.get(1), "s3.init");
assertEquals(ORDER.get(2), "s1.postInit");
assertEquals(ORDER.get(3), "s3.postInit");
server.destroy();
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s3.destroy");
Assert.assertEquals(ORDER.get(5), "s1.destroy");
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s3.destroy");
assertEquals(ORDER.get(5), "s1.destroy");
// 3 services, 2nd one fails on init
ORDER.clear();
@ -665,16 +671,16 @@ public void services() throws Exception {
server = new Server("server", dir, dir, dir, dir, conf);
try {
server.init();
Assert.fail();
fail();
} catch (ServerException ex) {
Assert.assertEquals(MyService2.class, ex.getError().getClass());
assertEquals(MyService2.class, ex.getError().getClass());
} catch (Exception ex) {
Assert.fail();
fail();
}
Assert.assertEquals(ORDER.size(), 3);
Assert.assertEquals(ORDER.get(0), "s1.init");
Assert.assertEquals(ORDER.get(1), "s2.init");
Assert.assertEquals(ORDER.get(2), "s1.destroy");
assertEquals(ORDER.size(), 3);
assertEquals(ORDER.get(0), "s1.init");
assertEquals(ORDER.get(1), "s2.init");
assertEquals(ORDER.get(2), "s1.destroy");
// 2 services one fails on destroy
ORDER.clear();
@ -683,15 +689,15 @@ public void services() throws Exception {
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(ORDER.size(), 4);
Assert.assertEquals(ORDER.get(0), "s1.init");
Assert.assertEquals(ORDER.get(1), "s5.init");
Assert.assertEquals(ORDER.get(2), "s1.postInit");
Assert.assertEquals(ORDER.get(3), "s5.postInit");
assertEquals(ORDER.size(), 4);
assertEquals(ORDER.get(0), "s1.init");
assertEquals(ORDER.get(1), "s5.init");
assertEquals(ORDER.get(2), "s1.postInit");
assertEquals(ORDER.get(3), "s5.postInit");
server.destroy();
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s5.destroy");
Assert.assertEquals(ORDER.get(5), "s1.destroy");
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s5.destroy");
assertEquals(ORDER.get(5), "s1.destroy");
// service override via ext
@ -705,16 +711,16 @@ public void services() throws Exception {
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
Assert.assertEquals(ORDER.size(), 4);
Assert.assertEquals(ORDER.get(0), "s1a.init");
Assert.assertEquals(ORDER.get(1), "s3.init");
Assert.assertEquals(ORDER.get(2), "s1a.postInit");
Assert.assertEquals(ORDER.get(3), "s3.postInit");
assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
assertEquals(ORDER.size(), 4);
assertEquals(ORDER.get(0), "s1a.init");
assertEquals(ORDER.get(1), "s3.init");
assertEquals(ORDER.get(2), "s1a.postInit");
assertEquals(ORDER.get(3), "s3.postInit");
server.destroy();
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s3.destroy");
Assert.assertEquals(ORDER.get(5), "s1a.destroy");
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s3.destroy");
assertEquals(ORDER.get(5), "s1a.destroy");
// service override via setService
ORDER.clear();
@ -725,16 +731,16 @@ public void services() throws Exception {
server.init();
server.setService(MyService1a.class);
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s1.destroy");
Assert.assertEquals(ORDER.get(5), "s1a.init");
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s1.destroy");
assertEquals(ORDER.get(5), "s1a.init");
Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
server.destroy();
Assert.assertEquals(ORDER.size(), 8);
Assert.assertEquals(ORDER.get(6), "s3.destroy");
Assert.assertEquals(ORDER.get(7), "s1a.destroy");
assertEquals(ORDER.size(), 8);
assertEquals(ORDER.get(6), "s3.destroy");
assertEquals(ORDER.get(7), "s1a.destroy");
// service add via setService
ORDER.clear();
@ -745,16 +751,16 @@ public void services() throws Exception {
server.init();
server.setService(MyService5.class);
Assert.assertEquals(ORDER.size(), 5);
Assert.assertEquals(ORDER.get(4), "s5.init");
assertEquals(ORDER.size(), 5);
assertEquals(ORDER.get(4), "s5.init");
Assert.assertEquals(server.get(MyService5.class).getClass(), MyService5.class);
assertEquals(server.get(MyService5.class).getClass(), MyService5.class);
server.destroy();
Assert.assertEquals(ORDER.size(), 8);
Assert.assertEquals(ORDER.get(5), "s5.destroy");
Assert.assertEquals(ORDER.get(6), "s3.destroy");
Assert.assertEquals(ORDER.get(7), "s1.destroy");
assertEquals(ORDER.size(), 8);
assertEquals(ORDER.get(5), "s5.destroy");
assertEquals(ORDER.get(6), "s3.destroy");
assertEquals(ORDER.get(7), "s1.destroy");
// service add via setService exception
ORDER.clear();
@ -765,15 +771,15 @@ public void services() throws Exception {
server.init();
try {
server.setService(MyService7.class);
Assert.fail();
fail();
} catch (ServerException ex) {
Assert.assertEquals(ServerException.ERROR.S09, ex.getError());
assertEquals(ServerException.ERROR.S09, ex.getError());
} catch (Exception ex) {
Assert.fail();
fail();
}
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s3.destroy");
Assert.assertEquals(ORDER.get(5), "s1.destroy");
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s3.destroy");
assertEquals(ORDER.get(5), "s1.destroy");
// service with dependency
ORDER.clear();
@ -782,8 +788,8 @@ public void services() throws Exception {
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
Assert.assertEquals(server.get(MyService6.class).getInterface(), MyService6.class);
assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
assertEquals(server.get(MyService6.class).getInterface(), MyService6.class);
server.destroy();
}

View File

@ -18,15 +18,15 @@
package org.apache.hadoop.lib.server;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.Arrays;
import java.util.Collection;
@RunWith(value = Parameterized.class)
public class TestServerConstructor extends HTestCase {

View File

@ -18,7 +18,16 @@
package org.apache.hadoop.lib.service.hadoop;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
@ -38,12 +47,6 @@
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
public class TestFileSystemAccessService extends HFSTestCase {
private void createHadoopConf(Configuration hadoopConf) throws Exception {
@ -71,7 +74,7 @@ public void simpleSecurity() throws Exception {
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertNotNull(server.get(FileSystemAccess.class));
assertNotNull(server.get(FileSystemAccess.class));
server.destroy();
}
@ -148,7 +151,7 @@ public void serviceHadoopConf() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO");
assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO");
server.destroy();
}
@ -174,7 +177,7 @@ public void serviceHadoopConfCustomDir() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
server.destroy();
}
@ -245,15 +248,15 @@ public void createFileSystem() throws Exception {
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs);
assertNotNull(fs);
fs.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs);
try {
fs.mkdirs(new Path("/tmp/foo"));
Assert.fail();
fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
fail();
}
server.destroy();
}
@ -288,10 +291,10 @@ public Void execute(FileSystem fs) throws IOException {
});
try {
fsa[0].mkdirs(new Path("/tmp/foo"));
Assert.fail();
fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
fail();
}
server.destroy();
}
@ -351,19 +354,19 @@ public Void execute(FileSystem fs) throws IOException {
throw new IOException();
}
});
Assert.fail();
fail();
} catch (FileSystemAccessException ex) {
Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03);
assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03);
} catch (Exception ex) {
Assert.fail();
fail();
}
try {
fsa[0].mkdirs(new Path("/tmp/foo"));
Assert.fail();
fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
fail();
}
server.destroy();
}

View File

@ -18,7 +18,16 @@
package org.apache.hadoop.lib.service.instrumentation;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Instrumentation;
@ -32,11 +41,6 @@
import org.json.simple.parser.JSONParser;
import org.junit.Test;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
public class TestInstrumentationService extends HTestCase {
@Override
@ -47,51 +51,51 @@ protected float getWaitForRatio() {
@Test
public void cron() {
InstrumentationService.Cron cron = new InstrumentationService.Cron();
Assert.assertEquals(cron.start, 0);
Assert.assertEquals(cron.lapStart, 0);
Assert.assertEquals(cron.own, 0);
Assert.assertEquals(cron.total, 0);
assertEquals(cron.start, 0);
assertEquals(cron.lapStart, 0);
assertEquals(cron.own, 0);
assertEquals(cron.total, 0);
long begin = Time.now();
Assert.assertEquals(cron.start(), cron);
Assert.assertEquals(cron.start(), cron);
Assert.assertEquals(cron.start, begin, 20);
Assert.assertEquals(cron.start, cron.lapStart);
assertEquals(cron.start(), cron);
assertEquals(cron.start(), cron);
assertEquals(cron.start, begin, 20);
assertEquals(cron.start, cron.lapStart);
sleep(100);
Assert.assertEquals(cron.stop(), cron);
assertEquals(cron.stop(), cron);
long end = Time.now();
long delta = end - begin;
Assert.assertEquals(cron.own, delta, 20);
Assert.assertEquals(cron.total, 0);
Assert.assertEquals(cron.lapStart, 0);
assertEquals(cron.own, delta, 20);
assertEquals(cron.total, 0);
assertEquals(cron.lapStart, 0);
sleep(100);
long reStart = Time.now();
cron.start();
Assert.assertEquals(cron.start, begin, 20);
Assert.assertEquals(cron.lapStart, reStart, 20);
assertEquals(cron.start, begin, 20);
assertEquals(cron.lapStart, reStart, 20);
sleep(100);
cron.stop();
long reEnd = Time.now();
delta += reEnd - reStart;
Assert.assertEquals(cron.own, delta, 20);
Assert.assertEquals(cron.total, 0);
Assert.assertEquals(cron.lapStart, 0);
assertEquals(cron.own, delta, 20);
assertEquals(cron.total, 0);
assertEquals(cron.lapStart, 0);
cron.end();
Assert.assertEquals(cron.total, reEnd - begin, 20);
assertEquals(cron.total, reEnd - begin, 20);
try {
cron.start();
Assert.fail();
fail();
} catch (IllegalStateException ex) {
} catch (Exception ex) {
Assert.fail();
fail();
}
try {
cron.stop();
Assert.fail();
fail();
} catch (IllegalStateException ex) {
} catch (Exception ex) {
Assert.fail();
fail();
}
}
@ -135,10 +139,10 @@ public void timer() throws Exception {
timer.addCron(cron);
long[] values = timer.getValues();
Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
cron = new InstrumentationService.Cron();
@ -168,10 +172,10 @@ public void timer() throws Exception {
timer.addCron(cron);
values = timer.getValues();
Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
avgTotal = totalDelta;
avgOwn = ownDelta;
@ -205,27 +209,27 @@ public void timer() throws Exception {
cron.stop();
timer.addCron(cron);
values = timer.getValues();
Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
JSONObject json = (JSONObject) new JSONParser().parse(timer.toJSONString());
Assert.assertEquals(json.size(), 4);
Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
assertEquals(json.size(), 4);
assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
StringWriter writer = new StringWriter();
timer.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
Assert.assertEquals(json.size(), 4);
Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
assertEquals(json.size(), 4);
assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
}
@Test
@ -240,34 +244,34 @@ public Long getValue() {
InstrumentationService.Sampler sampler = new InstrumentationService.Sampler();
sampler.init(4, var);
Assert.assertEquals(sampler.getRate(), 0f, 0.0001);
assertEquals(sampler.getRate(), 0f, 0.0001);
sampler.sample();
Assert.assertEquals(sampler.getRate(), 0f, 0.0001);
assertEquals(sampler.getRate(), 0f, 0.0001);
value[0] = 1;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001);
assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001);
value[0] = 2;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001);
assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001);
value[0] = 3;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001);
assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001);
value[0] = 4;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001);
assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001);
JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString());
Assert.assertEquals(json.size(), 2);
Assert.assertEquals(json.get("sampler"), sampler.getRate());
Assert.assertEquals(json.get("size"), 4L);
assertEquals(json.size(), 2);
assertEquals(json.get("sampler"), sampler.getRate());
assertEquals(json.get("size"), 4L);
StringWriter writer = new StringWriter();
sampler.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
Assert.assertEquals(json.size(), 2);
Assert.assertEquals(json.get("sampler"), sampler.getRate());
Assert.assertEquals(json.get("size"), 4L);
assertEquals(json.size(), 2);
assertEquals(json.get("sampler"), sampler.getRate());
assertEquals(json.get("size"), 4L);
}
@Test
@ -283,15 +287,15 @@ public String getValue() {
};
JSONObject json = (JSONObject) new JSONParser().parse(variableHolder.toJSONString());
Assert.assertEquals(json.size(), 1);
Assert.assertEquals(json.get("value"), "foo");
assertEquals(json.size(), 1);
assertEquals(json.get("value"), "foo");
StringWriter writer = new StringWriter();
variableHolder.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
Assert.assertEquals(json.size(), 1);
Assert.assertEquals(json.get("value"), "foo");
assertEquals(json.size(), 1);
assertEquals(json.get("value"), "foo");
}
@Test
@ -306,7 +310,7 @@ public void service() throws Exception {
server.init();
Instrumentation instrumentation = server.get(Instrumentation.class);
Assert.assertNotNull(instrumentation);
assertNotNull(instrumentation);
instrumentation.incr("g", "c", 1);
instrumentation.incr("g", "c", 2);
instrumentation.incr("g", "c1", 2);
@ -339,27 +343,27 @@ public Long getValue() {
instrumentation.addSampler("g", "s", 10, varToSample);
Map<String, ?> snapshot = instrumentation.getSnapshot();
Assert.assertNotNull(snapshot.get("os-env"));
Assert.assertNotNull(snapshot.get("sys-props"));
Assert.assertNotNull(snapshot.get("jvm"));
Assert.assertNotNull(snapshot.get("counters"));
Assert.assertNotNull(snapshot.get("timers"));
Assert.assertNotNull(snapshot.get("variables"));
Assert.assertNotNull(snapshot.get("samplers"));
Assert.assertNotNull(((Map<String, String>) snapshot.get("os-env")).get("PATH"));
Assert.assertNotNull(((Map<String, String>) snapshot.get("sys-props")).get("java.version"));
Assert.assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("free.memory"));
Assert.assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("max.memory"));
Assert.assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("total.memory"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c1"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g").get("t"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g").get("v"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g").get("s"));
assertNotNull(snapshot.get("os-env"));
assertNotNull(snapshot.get("sys-props"));
assertNotNull(snapshot.get("jvm"));
assertNotNull(snapshot.get("counters"));
assertNotNull(snapshot.get("timers"));
assertNotNull(snapshot.get("variables"));
assertNotNull(snapshot.get("samplers"));
assertNotNull(((Map<String, String>) snapshot.get("os-env")).get("PATH"));
assertNotNull(((Map<String, String>) snapshot.get("sys-props")).get("java.version"));
assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("free.memory"));
assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("max.memory"));
assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("total.memory"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c1"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g").get("t"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g").get("v"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g").get("s"));
StringWriter writer = new StringWriter();
JSONObject.writeJSONString(snapshot, writer);
@ -392,12 +396,12 @@ public Long getValue() {
sleep(2000);
int i = count.get();
Assert.assertTrue(i > 0);
assertTrue(i > 0);
Map<String, Map<String, ?>> snapshot = instrumentation.getSnapshot();
Map<String, Map<String, Object>> samplers = (Map<String, Map<String, Object>>) snapshot.get("samplers");
InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s");
Assert.assertTrue(sampler.getRate() > 0);
assertTrue(sampler.getRate() > 0);
server.destroy();
}

View File

@ -18,7 +18,10 @@
package org.apache.hadoop.lib.service.scheduler;
import junit.framework.Assert;
import static org.junit.Assert.assertNotNull;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Scheduler;
@ -29,8 +32,6 @@
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.util.Arrays;
public class TestSchedulerService extends HTestCase {
@Test
@ -42,7 +43,7 @@ public void service() throws Exception {
SchedulerService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertNotNull(server.get(Scheduler.class));
assertNotNull(server.get(Scheduler.class));
server.destroy();
}

View File

@ -17,14 +17,14 @@
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.security.GroupMappingServiceProvider;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.security.GroupMappingServiceProvider;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
public class DummyGroupMapping implements GroupMappingServiceProvider {
@Override

View File

@ -18,7 +18,12 @@
package org.apache.hadoop.lib.service.security;
import junit.framework.Assert;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Groups;
@ -28,9 +33,6 @@
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
public class TestGroupsService extends HTestCase {
@Test
@ -42,9 +44,9 @@ public void service() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Groups groups = server.get(Groups.class);
Assert.assertNotNull(groups);
assertNotNull(groups);
List<String> g = groups.getGroups(System.getProperty("user.name"));
Assert.assertNotSame(g.size(), 0);
assertNotSame(g.size(), 0);
server.destroy();
}

View File

@ -18,7 +18,12 @@
package org.apache.hadoop.lib.service.security;
import junit.framework.Assert;
import static org.junit.Assert.assertNotNull;
import java.security.AccessControlException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServiceException;
@ -31,10 +36,6 @@
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.security.AccessControlException;
import java.util.Arrays;
import java.util.List;
public class TestProxyUserService extends HTestCase {
@Test
@ -47,7 +48,7 @@ public void service() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
server.destroy();
}
@ -103,7 +104,7 @@ public void validateAnyHostAnyUser() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", "bar");
server.destroy();
}
@ -120,7 +121,7 @@ public void invalidProxyUser() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
proxyUser.validate("bar", "localhost", "foo");
server.destroy();
}
@ -137,7 +138,7 @@ public void validateHost() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", "bar");
server.destroy();
}
@ -166,7 +167,7 @@ public void validateGroup() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", System.getProperty("user.name"));
server.destroy();
}
@ -184,7 +185,7 @@ public void unknownHost() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
proxyUser.validate("foo", "unknownhost.bar.foo", "bar");
server.destroy();
}
@ -201,7 +202,7 @@ public void invalidHost() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
proxyUser.validate("foo", "www.yahoo.com", "bar");
server.destroy();
}
@ -218,7 +219,7 @@ public void invalidGroup() throws Exception {
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", System.getProperty("user.name"));
server.destroy();
}

View File

@ -18,18 +18,21 @@
package org.apache.hadoop.lib.servlet;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
public class TestHostnameFilter extends HTestCase {
@ -47,17 +50,17 @@ public void hostname() throws Exception {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertTrue(HostnameFilter.get().contains("localhost"));
assertTrue(HostnameFilter.get().contains("localhost"));
invoked.set(true);
}
};
Filter filter = new HostnameFilter();
filter.init(null);
Assert.assertNull(HostnameFilter.get());
assertNull(HostnameFilter.get());
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
Assert.assertNull(HostnameFilter.get());
assertTrue(invoked.get());
assertNull(HostnameFilter.get());
filter.destroy();
}

View File

@ -18,11 +18,13 @@
package org.apache.hadoop.lib.servlet;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.MDC;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.security.Principal;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
@ -30,9 +32,11 @@
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.security.Principal;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.MDC;
public class TestMDCFilter extends HTestCase {
@ -52,10 +56,10 @@ public void mdc() throws Exception {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertEquals(MDC.get("hostname"), null);
Assert.assertEquals(MDC.get("user"), null);
Assert.assertEquals(MDC.get("method"), "METHOD");
Assert.assertEquals(MDC.get("path"), "/pathinfo");
assertEquals(MDC.get("hostname"), null);
assertEquals(MDC.get("user"), null);
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
@ -65,11 +69,11 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo
filter.init(null);
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
Assert.assertNull(MDC.get("hostname"));
Assert.assertNull(MDC.get("user"));
Assert.assertNull(MDC.get("method"));
Assert.assertNull(MDC.get("path"));
assertTrue(invoked.get());
assertNull(MDC.get("hostname"));
assertNull(MDC.get("user"));
assertNull(MDC.get("method"));
assertNull(MDC.get("path"));
Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() {
@Override
@ -83,15 +87,15 @@ public String getName() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertEquals(MDC.get("hostname"), null);
Assert.assertEquals(MDC.get("user"), "name");
Assert.assertEquals(MDC.get("method"), "METHOD");
Assert.assertEquals(MDC.get("path"), "/pathinfo");
assertEquals(MDC.get("hostname"), null);
assertEquals(MDC.get("user"), "name");
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.set("HOST");
@ -100,15 +104,15 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertEquals(MDC.get("hostname"), "HOST");
Assert.assertEquals(MDC.get("user"), "name");
Assert.assertEquals(MDC.get("method"), "METHOD");
Assert.assertEquals(MDC.get("path"), "/pathinfo");
assertEquals(MDC.get("hostname"), "HOST");
assertEquals(MDC.get("user"), "name");
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.remove();

View File

@ -18,7 +18,8 @@
package org.apache.hadoop.lib.servlet;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
@ -35,10 +36,10 @@ public void getHomeDirNotDef() {
@Test
public void getHomeDir() {
System.setProperty("TestServerWebApp0.home.dir", "/tmp");
Assert.assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp");
Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log");
assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp");
assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log");
System.setProperty("TestServerWebApp0.log.dir", "/tmplog");
Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog");
assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog");
}
@Test
@ -52,11 +53,11 @@ public void lifecycle() throws Exception {
ServerWebApp server = new ServerWebApp("TestServerWebApp1") {
};
Assert.assertEquals(server.getStatus(), Server.Status.UNDEF);
assertEquals(server.getStatus(), Server.Status.UNDEF);
server.contextInitialized(null);
Assert.assertEquals(server.getStatus(), Server.Status.NORMAL);
assertEquals(server.getStatus(), Server.Status.NORMAL);
server.contextDestroyed(null);
Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
}
@Test(expected = RuntimeException.class)

View File

@ -19,18 +19,19 @@
package org.apache.hadoop.lib.util;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
public class TestCheck extends HTestCase {
@Test
public void notNullNotNull() {
Assert.assertEquals(Check.notNull("value", "name"), "value");
assertEquals(Check.notNull("value", "name"), "value");
}
@Test(expected = IllegalArgumentException.class)
@ -79,7 +80,7 @@ public void notEmptyElementsEmptyElements() {
@Test
public void notEmptyNotEmtpy() {
Assert.assertEquals(Check.notEmpty("value", "name"), "value");
assertEquals(Check.notEmpty("value", "name"), "value");
}
@Test(expected = IllegalArgumentException.class)
@ -94,10 +95,10 @@ public void notEmptyEmpty() {
@Test
public void validIdentifierValid() throws Exception {
Assert.assertEquals(Check.validIdentifier("a", 1, ""), "a");
Assert.assertEquals(Check.validIdentifier("a1", 2, ""), "a1");
Assert.assertEquals(Check.validIdentifier("a_", 3, ""), "a_");
Assert.assertEquals(Check.validIdentifier("_", 1, ""), "_");
assertEquals(Check.validIdentifier("a", 1, ""), "a");
assertEquals(Check.validIdentifier("a1", 2, ""), "a1");
assertEquals(Check.validIdentifier("a_", 3, ""), "a_");
assertEquals(Check.validIdentifier("_", 1, ""), "_");
}
@Test(expected = IllegalArgumentException.class)
@ -117,7 +118,7 @@ public void validIdentifierInvalid3() throws Exception {
@Test
public void checkGTZeroGreater() {
Assert.assertEquals(Check.gt0(120, "test"), 120);
assertEquals(Check.gt0(120, "test"), 120);
}
@Test(expected = IllegalArgumentException.class)
@ -132,8 +133,8 @@ public void checkGTZeroLessThanZero() {
@Test
public void checkGEZero() {
Assert.assertEquals(Check.ge0(120, "test"), 120);
Assert.assertEquals(Check.ge0(0, "test"), 0);
assertEquals(Check.ge0(120, "test"), 120);
assertEquals(Check.ge0(0, "test"), 0);
}
@Test(expected = IllegalArgumentException.class)

View File

@ -18,27 +18,29 @@
package org.apache.hadoop.lib.util;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestConfigurationUtils {
@Test
public void constructors() throws Exception {
Configuration conf = new Configuration(false);
Assert.assertEquals(conf.size(), 0);
assertEquals(conf.size(), 0);
byte[] bytes = "<configuration><property><name>a</name><value>A</value></property></configuration>".getBytes();
InputStream is = new ByteArrayInputStream(bytes);
conf = new Configuration(false);
ConfigurationUtils.load(conf, is);
Assert.assertEquals(conf.size(), 1);
Assert.assertEquals(conf.get("a"), "A");
assertEquals(conf.size(), 1);
assertEquals(conf.get("a"), "A");
}
@ -62,9 +64,9 @@ public void copy() throws Exception {
ConfigurationUtils.copy(srcConf, targetConf);
Assert.assertEquals("valueFromSource", targetConf.get("testParameter1"));
Assert.assertEquals("valueFromSource", targetConf.get("testParameter2"));
Assert.assertEquals("valueFromTarget", targetConf.get("testParameter3"));
assertEquals("valueFromSource", targetConf.get("testParameter1"));
assertEquals("valueFromSource", targetConf.get("testParameter2"));
assertEquals("valueFromTarget", targetConf.get("testParameter3"));
}
@Test
@ -80,13 +82,13 @@ public void injectDefaults() throws Exception {
ConfigurationUtils.injectDefaults(srcConf, targetConf);
Assert.assertEquals("valueFromSource", targetConf.get("testParameter1"));
Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter2"));
Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter3"));
assertEquals("valueFromSource", targetConf.get("testParameter1"));
assertEquals("originalValueFromTarget", targetConf.get("testParameter2"));
assertEquals("originalValueFromTarget", targetConf.get("testParameter3"));
Assert.assertEquals("valueFromSource", srcConf.get("testParameter1"));
Assert.assertEquals("valueFromSource", srcConf.get("testParameter2"));
Assert.assertNull(srcConf.get("testParameter3"));
assertEquals("valueFromSource", srcConf.get("testParameter1"));
assertEquals("valueFromSource", srcConf.get("testParameter2"));
assertNull(srcConf.get("testParameter3"));
}
@ -95,11 +97,11 @@ public void resolve() {
Configuration conf = new Configuration(false);
conf.set("a", "A");
conf.set("b", "${a}");
Assert.assertEquals(conf.getRaw("a"), "A");
Assert.assertEquals(conf.getRaw("b"), "${a}");
assertEquals(conf.getRaw("a"), "A");
assertEquals(conf.getRaw("b"), "${a}");
conf = ConfigurationUtils.resolve(conf);
Assert.assertEquals(conf.getRaw("a"), "A");
Assert.assertEquals(conf.getRaw("b"), "A");
assertEquals(conf.getRaw("a"), "A");
assertEquals(conf.getRaw("b"), "A");
}
@Test
@ -110,16 +112,16 @@ public void testVarResolutionAndSysProps() {
conf.set("b", "${a}");
conf.set("c", "${user.name}");
conf.set("d", "${aaa}");
Assert.assertEquals(conf.getRaw("a"), "A");
Assert.assertEquals(conf.getRaw("b"), "${a}");
Assert.assertEquals(conf.getRaw("c"), "${user.name}");
Assert.assertEquals(conf.get("a"), "A");
Assert.assertEquals(conf.get("b"), "A");
Assert.assertEquals(conf.get("c"), userName);
Assert.assertEquals(conf.get("d"), "${aaa}");
assertEquals(conf.getRaw("a"), "A");
assertEquals(conf.getRaw("b"), "${a}");
assertEquals(conf.getRaw("c"), "${user.name}");
assertEquals(conf.get("a"), "A");
assertEquals(conf.get("b"), "A");
assertEquals(conf.get("c"), userName);
assertEquals(conf.get("d"), "${aaa}");
conf.set("user.name", "foo");
Assert.assertEquals(conf.get("user.name"), "foo");
assertEquals(conf.get("user.name"), "foo");
}
}

View File

@ -18,13 +18,14 @@
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import org.junit.Test;
public class TestInputStreamEntity {
@Test
@ -34,14 +35,14 @@ public void test() throws Exception {
InputStreamEntity i = new InputStreamEntity(is);
i.write(baos);
baos.close();
Assert.assertEquals(new String(baos.toByteArray()), "abc");
assertEquals(new String(baos.toByteArray()), "abc");
is = new ByteArrayInputStream("abc".getBytes());
baos = new ByteArrayOutputStream();
i = new InputStreamEntity(is, 1, 1);
i.write(baos);
baos.close();
Assert.assertEquals(baos.toByteArray()[0], 'b');
assertEquals(baos.toByteArray()[0], 'b');
}
}

View File

@ -18,28 +18,31 @@
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.json.simple.JSONObject;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.util.Map;
import org.json.simple.JSONObject;
import org.junit.Test;
public class TestJSONMapProvider {
@Test
@SuppressWarnings("unchecked")
public void test() throws Exception {
JSONMapProvider p = new JSONMapProvider();
Assert.assertTrue(p.isWriteable(Map.class, null, null, null));
Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null));
Assert.assertEquals(p.getSize(null, null, null, null, null), -1);
assertTrue(p.isWriteable(Map.class, null, null, null));
assertFalse(p.isWriteable(this.getClass(), null, null, null));
assertEquals(p.getSize(null, null, null, null, null), -1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JSONObject json = new JSONObject();
json.put("a", "A");
p.writeTo(json, JSONObject.class, null, null, null, null, baos);
baos.close();
Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
}
}

View File

@ -18,27 +18,30 @@
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.json.simple.JSONObject;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import org.json.simple.JSONObject;
import org.junit.Test;
public class TestJSONProvider {
@Test
@SuppressWarnings("unchecked")
public void test() throws Exception {
JSONProvider p = new JSONProvider();
Assert.assertTrue(p.isWriteable(JSONObject.class, null, null, null));
Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null));
Assert.assertEquals(p.getSize(null, null, null, null, null), -1);
assertTrue(p.isWriteable(JSONObject.class, null, null, null));
assertFalse(p.isWriteable(this.getClass(), null, null, null));
assertEquals(p.getSize(null, null, null, null, null), -1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JSONObject json = new JSONObject();
json.put("a", "A");
p.writeTo(json, JSONObject.class, null, null, null, null, baos);
baos.close();
Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
}
}

View File

@ -18,41 +18,43 @@
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.util.regex.Pattern;
import org.junit.Test;
public class TestParam {
private <T> void test(Param<T> param, String name,
String domain, T defaultValue, T validValue,
String invalidStrValue, String outOfRangeValue) throws Exception {
Assert.assertEquals(name, param.getName());
Assert.assertEquals(domain, param.getDomain());
Assert.assertEquals(defaultValue, param.value());
Assert.assertEquals(defaultValue, param.parseParam(""));
Assert.assertEquals(defaultValue, param.parseParam(null));
Assert.assertEquals(validValue, param.parseParam(validValue.toString()));
assertEquals(name, param.getName());
assertEquals(domain, param.getDomain());
assertEquals(defaultValue, param.value());
assertEquals(defaultValue, param.parseParam(""));
assertEquals(defaultValue, param.parseParam(null));
assertEquals(validValue, param.parseParam(validValue.toString()));
if (invalidStrValue != null) {
try {
param.parseParam(invalidStrValue);
Assert.fail();
fail();
} catch (IllegalArgumentException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
fail();
}
}
if (outOfRangeValue != null) {
try {
param.parseParam(outOfRangeValue);
Assert.fail();
fail();
} catch (IllegalArgumentException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
fail();
}
}
}
@ -81,7 +83,7 @@ public void testShort() throws Exception {
param = new ShortParam("S", (short) 1, 8) {
};
Assert.assertEquals(new Short((short)01777), param.parse("01777"));
assertEquals(new Short((short)01777), param.parse("01777"));
}
@Test

View File

@ -18,16 +18,20 @@
package org.apache.hadoop.lib.wsrs;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.api.core.HttpRequestContext;
import com.sun.jersey.core.spi.component.ComponentScope;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.security.Principal;
import javax.ws.rs.core.MultivaluedMap;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.MDC;
import javax.ws.rs.core.MultivaluedMap;
import java.security.Principal;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.api.core.HttpRequestContext;
import com.sun.jersey.core.spi.component.ComponentScope;
public class TestUserProvider {
@ -43,8 +47,8 @@ public void noUser() {
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
Assert.assertNull(up.getValue(context));
Assert.assertNull(MDC.get("user"));
assertNull(up.getValue(context));
assertNull(MDC.get("user"));
}
@Test
@ -59,8 +63,8 @@ public void queryStringUser() {
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
Assert.assertEquals(up.getValue(context).getName(), "foo");
Assert.assertEquals(MDC.get("user"), "foo");
assertEquals(up.getValue(context).getName(), "foo");
assertEquals(MDC.get("user"), "foo");
}
@Test
@ -77,15 +81,15 @@ public String getName() {
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
Assert.assertEquals(up.getValue(context).getName(), "bar");
Assert.assertEquals(MDC.get("user"), "bar");
assertEquals(up.getValue(context).getName(), "bar");
assertEquals(MDC.get("user"), "bar");
}
@Test
public void getters() {
UserProvider up = new UserProvider();
Assert.assertEquals(up.getScope(), ComponentScope.PerRequest);
Assert.assertEquals(up.getInjectable(null, null, Principal.class), up);
Assert.assertNull(up.getInjectable(null, null, String.class));
assertEquals(up.getScope(), ComponentScope.PerRequest);
assertEquals(up.getInjectable(null, null, Principal.class), up);
assertNull(up.getInjectable(null, null, String.class));
}
}

View File

@ -17,14 +17,14 @@
*/
package org.apache.hadoop.test;
import junit.framework.Assert;
import static org.junit.Assert.fail;
import java.text.MessageFormat;
import org.apache.hadoop.util.Time;
import org.junit.Rule;
import org.junit.rules.MethodRule;
import java.text.MessageFormat;
public abstract class HTestCase {
public static final String TEST_WAITFOR_RATIO_PROP = "test.waitfor.ratio";
@ -161,7 +161,7 @@ protected long waitFor(int timeout, boolean failIfTimeout, Predicate predicate)
}
if (!eval) {
if (failIfTimeout) {
Assert.fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout));
fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout));
} else {
System.out.println(MessageFormat.format("Waiting timed out after [{0}] msec", timeout));
}

View File

@ -17,12 +17,12 @@
*/
package org.apache.hadoop.test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Helper to configure FileSystemAccess user/group and proxyuser
* configuration for testing using Java System properties.

View File

@ -17,16 +17,16 @@
*/
package org.apache.hadoop.test;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
import java.io.File;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
public class TestDirHelper implements MethodRule {
@Test

View File

@ -17,14 +17,15 @@
*/
package org.apache.hadoop.test;
import junit.framework.Assert;
import static org.junit.Assert.fail;
import java.util.regex.Pattern;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
import java.util.regex.Pattern;
public class TestExceptionHelper implements MethodRule {
@Test
@ -41,7 +42,7 @@ public void evaluate() throws Throwable {
statement.evaluate();
if (testExceptionAnnotation != null) {
Class<? extends Throwable> klass = testExceptionAnnotation.exception();
Assert.fail("Expected Exception: " + klass.getSimpleName());
fail("Expected Exception: " + klass.getSimpleName());
}
} catch (Throwable ex) {
if (testExceptionAnnotation != null) {
@ -50,10 +51,10 @@ public void evaluate() throws Throwable {
String regExp = testExceptionAnnotation.msgRegExp();
Pattern pattern = Pattern.compile(regExp);
if (!pattern.matcher(ex.getMessage()).find()) {
Assert.fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage());
fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage());
}
} else {
Assert.fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName());
fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName());
}
} else {
throw ex;

View File

@ -18,19 +18,9 @@
package org.apache.hadoop.test;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
@ -39,6 +29,19 @@
import java.net.HttpURLConnection;
import java.net.URL;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
public class TestHFSTestCase extends HFSTestCase {
@Test(expected = IllegalStateException.class)
@ -69,7 +72,7 @@ public void testHdfsNoAnnotation2() throws Exception {
@Test
@TestDir
public void testDirAnnotation() throws Exception {
Assert.assertNotNull(TestDirHelper.getTestDir());
assertNotNull(TestDirHelper.getTestDir());
}
@Test
@ -81,8 +84,8 @@ public boolean evaluate() throws Exception {
}
});
long end = Time.now();
Assert.assertEquals(waited, 0, 50);
Assert.assertEquals(end - start - waited, 0, 50);
assertEquals(waited, 0, 50);
assertEquals(end - start - waited, 0, 50);
}
@Test
@ -95,8 +98,8 @@ public boolean evaluate() throws Exception {
}
});
long end = Time.now();
Assert.assertEquals(waited, -1);
Assert.assertEquals(end - start, 200, 50);
assertEquals(waited, -1);
assertEquals(end - start, 200, 50);
}
@Test
@ -109,8 +112,8 @@ public boolean evaluate() throws Exception {
}
});
long end = Time.now();
Assert.assertEquals(waited, -1);
Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
assertEquals(waited, -1);
assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
}
@Test
@ -119,7 +122,7 @@ public void sleepRatio1() {
long start = Time.now();
sleep(100);
long end = Time.now();
Assert.assertEquals(end - start, 100, 50);
assertEquals(end - start, 100, 50);
}
@Test
@ -128,7 +131,7 @@ public void sleepRatio2() {
long start = Time.now();
sleep(100);
long end = Time.now();
Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
}
@Test
@ -141,8 +144,8 @@ public void testHadoopFileSystem() throws Exception {
os.write(new byte[]{1});
os.close();
InputStream is = fs.open(new Path(TestHdfsHelper.getHdfsTestDir(), "foo"));
Assert.assertEquals(is.read(), 1);
Assert.assertEquals(is.read(), -1);
assertEquals(is.read(), 1);
assertEquals(is.read(), -1);
is.close();
} finally {
fs.close();
@ -167,9 +170,9 @@ public void testJetty() throws Exception {
server.start();
URL url = new URL(TestJettyHelper.getJettyURL(), "/bar");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
Assert.assertEquals(reader.readLine(), "foo");
assertEquals(reader.readLine(), "foo");
reader.close();
}

View File

@ -18,23 +18,25 @@
package org.apache.hadoop.test;
import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
public class TestHTestCase extends HTestCase {
@Test(expected = IllegalStateException.class)
@ -55,7 +57,7 @@ public void testJettyNoAnnotation2() throws Exception {
@Test
@TestDir
public void testDirAnnotation() throws Exception {
Assert.assertNotNull(TestDirHelper.getTestDir());
assertNotNull(TestDirHelper.getTestDir());
}
@Test
@ -67,8 +69,8 @@ public boolean evaluate() throws Exception {
}
});
long end = Time.now();
Assert.assertEquals(waited, 0, 50);
Assert.assertEquals(end - start - waited, 0, 50);
assertEquals(waited, 0, 50);
assertEquals(end - start - waited, 0, 50);
}
@Test
@ -81,8 +83,8 @@ public boolean evaluate() throws Exception {
}
});
long end = Time.now();
Assert.assertEquals(waited, -1);
Assert.assertEquals(end - start, 200, 50);
assertEquals(waited, -1);
assertEquals(end - start, 200, 50);
}
@Test
@ -95,8 +97,8 @@ public boolean evaluate() throws Exception {
}
});
long end = Time.now();
Assert.assertEquals(waited, -1);
Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
assertEquals(waited, -1);
assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
}
@Test
@ -105,7 +107,7 @@ public void sleepRatio1() {
long start = Time.now();
sleep(100);
long end = Time.now();
Assert.assertEquals(end - start, 100, 50);
assertEquals(end - start, 100, 50);
}
@Test
@ -114,7 +116,7 @@ public void sleepRatio2() {
long start = Time.now();
sleep(100);
long end = Time.now();
Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
}
public static class MyServlet extends HttpServlet {
@ -135,9 +137,9 @@ public void testJetty() throws Exception {
server.start();
URL url = new URL(TestJettyHelper.getJettyURL(), "/bar");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
Assert.assertEquals(reader.readLine(), "foo");
assertEquals(reader.readLine(), "foo");
reader.close();
}

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.test;
import java.io.File;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -26,9 +29,6 @@
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
import java.io.File;
import java.util.concurrent.atomic.AtomicInteger;
public class TestHdfsHelper extends TestDirHelper {
@Test

View File

@ -17,17 +17,17 @@
*/
package org.apache.hadoop.test;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.ServerSocket;
import java.net.URL;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
import org.mortbay.jetty.Server;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.ServerSocket;
import java.net.URL;
public class TestJettyHelper implements MethodRule {
@Test

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
@ -28,8 +31,6 @@
import java.util.regex.Pattern;
import java.util.zip.CRC32;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -44,8 +45,9 @@
import org.apache.hadoop.raid.RaidUtils;
import org.apache.hadoop.raid.protocol.PolicyInfo.ErasureCodeType;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
public class TestRaidDfs extends TestCase {
public class TestRaidDfs {
final static String TEST_DIR = new File(System.getProperty("test.build.data",
"target/test-data")).getAbsolutePath();
final static String LOG_DIR = "target/raidlog";
@ -195,6 +197,7 @@ private void corruptBlockAndValidate(Path srcFile, Path destPath,
* Create a file, corrupt several blocks in it and ensure that the file can be
* read through DistributedRaidFileSystem by ReedSolomon coding.
*/
@Test
public void testRaidDfsRs() throws Exception {
LOG.info("Test testRaidDfs started.");
@ -224,6 +227,7 @@ public void testRaidDfsRs() throws Exception {
/**
* Test DistributedRaidFileSystem.readFully()
*/
@Test
public void testReadFully() throws Exception {
code = ErasureCodeType.XOR;
stripeLength = 3;
@ -268,6 +272,7 @@ public void testReadFully() throws Exception {
* Test that access time and mtime of a source file do not change after
* raiding.
*/
@Test
public void testAccessTime() throws Exception {
LOG.info("Test testAccessTime started.");
@ -300,6 +305,7 @@ public void testAccessTime() throws Exception {
* Create a file, corrupt a block in it and ensure that the file can be
* read through DistributedRaidFileSystem by XOR code.
*/
@Test
public void testRaidDfsXor() throws Exception {
LOG.info("Test testRaidDfs started.");

View File

@ -17,6 +17,11 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
@ -26,32 +31,27 @@
import java.util.Random;
import java.util.zip.CRC32;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.JarFinder;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.raid.RaidNode;
import org.apache.hadoop.raid.RaidUtils;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.JarFinder;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
public class TestBlockFixer {

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.raid;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestBlockFixerBlockFixDist extends TestBlockFixer {
@Test

View File

@ -17,19 +17,19 @@
*/
package org.apache.hadoop.raid;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.raid.RaidNode;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
public class TestBlockFixerDistConcurrency extends TestBlockFixer {
/**

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.raid;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestBlockFixerGeneratedBlockDist extends TestBlockFixer {
/**

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.raid;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestBlockFixerParityBlockFixDist extends TestBlockFixer {
@Test

View File

@ -17,27 +17,29 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.raid.protocol.PolicyInfo;
import org.apache.hadoop.util.Time;
import org.junit.Test;
public class TestDirectoryTraversal extends TestCase {
public class TestDirectoryTraversal {
final static Log LOG = LogFactory.getLog(
"org.apache.hadoop.raid.TestDirectoryTraversal");
final static String TEST_DIR = new File(System.getProperty("test.build.data",
@ -50,6 +52,7 @@ public class TestDirectoryTraversal extends TestCase {
/**
* Test basic enumeration.
*/
@Test
public void testEnumeration() throws IOException {
mySetup();
@ -91,6 +94,7 @@ public void testEnumeration() throws IOException {
}
}
@Test
public void testSuspension() throws IOException {
LOG.info("Starting testSuspension");
mySetup();
@ -128,6 +132,7 @@ public void testSuspension() throws IOException {
}
}
@Test
public void testFileFilter() throws IOException {
mySetup();

View File

@ -17,19 +17,22 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import junit.framework.TestCase;
public class TestErasureCodes extends TestCase {
public class TestErasureCodes {
final int TEST_CODES = 100;
final int TEST_TIMES = 1000;
final Random RAND = new Random();
@Test
public void testEncodeDecode() {
for (int n = 0; n < TEST_CODES; n++) {
int stripeSize = RAND.nextInt(99) + 1; // 1, 2, 3, ... 100
@ -67,6 +70,7 @@ public void testEncodeDecode() {
}
}
@Test
public void testRSPerformance() {
int stripeSize = 10;
int paritySize = 4;
@ -131,6 +135,7 @@ public void testRSPerformance() {
assertTrue("Decode failed", java.util.Arrays.equals(copy, message[0]));
}
@Test
public void testXorPerformance() {
java.util.Random RAND = new java.util.Random();
int stripeSize = 10;
@ -171,6 +176,7 @@ public void testXorPerformance() {
assertTrue("Decode failed", java.util.Arrays.equals(copy, message[0]));
}
@Test
public void testComputeErrorLocations() {
for (int i = 0; i < TEST_TIMES; ++i) {
verifyErrorLocations(10, 4, 1);

View File

@ -17,13 +17,15 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertTrue;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import java.util.HashSet;
import junit.framework.TestCase;
import org.junit.Test;
public class TestGaloisField extends TestCase {
public class TestGaloisField {
final int TEST_TIMES = 10000;
final Random RAND = new Random();
@ -40,6 +42,7 @@ private int[] randGFPoly(int len) {
return result;
}
@Test
public void testGetInstance() {
GaloisField gf1 = GaloisField.getInstance(256, 285);
GaloisField gf2 = GaloisField.getInstance();
@ -52,6 +55,7 @@ public void testGetInstance() {
assertTrue(gf5 == gf6);
}
@Test
public void testDistributivity() {
for (int i = 0; i < TEST_TIMES; i++) {
int a = RAND.nextInt(GF.getFieldSize());
@ -64,6 +68,7 @@ public void testDistributivity() {
}
}
@Test
public void testDevision() {
for (int i = 0; i < TEST_TIMES; i++) {
int a = RAND.nextInt(GF.getFieldSize());
@ -77,6 +82,7 @@ public void testDevision() {
}
}
@Test
public void testPower() {
for (int i = 0; i < TEST_TIMES; i++) {
int a = randGF();
@ -90,6 +96,7 @@ public void testPower() {
}
}
@Test
public void testPolynomialDistributivity() {
final int TEST_LEN = 15;
for (int i = 0; i < TEST_TIMES; i++) {
@ -103,6 +110,7 @@ public void testPolynomialDistributivity() {
}
}
@Test
public void testSubstitute() {
final int TEST_LEN = 15;
for (int i = 0; i < TEST_TIMES; i++) {
@ -121,6 +129,7 @@ public void testSubstitute() {
}
}
@Test
public void testSolveVandermondeSystem() {
final int TEST_LEN = 15;
for (int i = 0; i < TEST_TIMES; i++) {
@ -151,6 +160,7 @@ public void testSolveVandermondeSystem() {
}
}
@Test
public void testRemainder() {
final int TEST_LEN = 15;
for (int i = 0; i < TEST_TIMES; i++) {

View File

@ -17,25 +17,30 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestHarIndexParser extends TestCase {
public class TestHarIndexParser {
final static Log LOG = LogFactory.getLog(TestHarIndexParser.class);
File indexFile = null;
protected void setUp() throws FileNotFoundException, IOException {
@Before
public void setUp() throws FileNotFoundException, IOException {
LOG.info("TestHarIndexParser.setUp()");
indexFile = File.createTempFile("harindex", ".tmp");
indexFile.deleteOnExit();
@ -51,12 +56,14 @@ protected void setUp() throws FileNotFoundException, IOException {
out.close();
}
protected void tearDown() {
@After
public void tearDown() {
LOG.info("TestHarIndexParser.tearDown()");
if (indexFile != null)
indexFile.delete();
}
@Test
public void testHarIndexParser()
throws UnsupportedEncodingException, IOException {
LOG.info("testHarIndexParser started.");

View File

@ -17,25 +17,25 @@
*/
package org.apache.hadoop.raid;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.util.ArrayList;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.raid.protocol.PolicyInfo;
import org.apache.hadoop.util.Time;
import org.junit.Test;
public class TestRaidFilter extends TestCase {
public class TestRaidFilter {
final static String TEST_DIR = new File(System.getProperty("test.build.data",
"target/test-data")).getAbsolutePath();
final static Log LOG =
@ -59,6 +59,7 @@ private void myTearDown() throws Exception {
if (dfs != null) { dfs.shutdown(); }
}
@Test
public void testLayeredPolicies() throws Exception {
mySetup();
Path src1 = new Path("/user/foo");

View File

@ -17,31 +17,32 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.FileWriter;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.util.Random;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* If a file gets deleted, then verify that the parity file gets deleted too.
*/
public class TestRaidHar extends TestCase {
public class TestRaidHar {
final static String TEST_DIR = new File(System.getProperty("test.build.data",
"target/test-data")).getAbsolutePath();
final static String CONFIG_FILE = new File(TEST_DIR,
@ -182,6 +183,7 @@ private void stopClusters() throws Exception {
* Test that parity files that do not have an associated master file
* get deleted.
*/
@Test
public void testRaidHar() throws Exception {
LOG.info("Test testRaidHar started.");

View File

@ -17,26 +17,26 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileWriter;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import java.util.zip.CRC32;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
@ -45,14 +45,16 @@
import org.apache.hadoop.raid.protocol.PolicyInfo;
import org.apache.hadoop.raid.protocol.PolicyList;
import org.apache.hadoop.util.JarFinder;
import org.apache.hadoop.raid.protocol.PolicyInfo.ErasureCodeType;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* Test the generation of parity blocks for files with different block
* sizes. Also test that a data block can be regenerated from a raid stripe
* using the parity block
*/
public class TestRaidNode extends TestCase {
public class TestRaidNode {
final static String TEST_DIR = new File(System.getProperty("test.build.data",
"target/test-data")).getAbsolutePath();
public static final String DistRaid_JAR = JarFinder.getJar(DistRaid.class);
@ -258,6 +260,7 @@ private void stopClusters() throws Exception {
/**
* Test to run a filter
*/
@Test
public void testPathFilter() throws Exception {
LOG.info("Test testPathFilter started.");
@ -513,6 +516,7 @@ static void createTestFiles(FileSystem fileSys, String path, String destpath, in
/**
* Test dist Raid
*/
@Test
public void testDistRaid() throws Exception {
LOG.info("Test testDistRaid started.");
long targetReplication = 2;
@ -664,6 +668,7 @@ private void validateFile(FileSystem fileSys, Path name1, Path name2, long crc)
}
}
@Test
public void testSuspendTraversal() throws Exception {
LOG.info("Test testSuspendTraversal started.");
long targetReplication = 2;

View File

@ -17,48 +17,37 @@
*/
package org.apache.hadoop.raid;
import java.io.File;
import java.io.FileWriter;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.GregorianCalendar;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Random;
import java.util.zip.CRC32;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.raid.protocol.PolicyInfo;
import org.apache.hadoop.raid.protocol.PolicyList;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.raid.protocol.PolicyInfo;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* If a file gets deleted, then verify that the parity file gets deleted too.
*/
public class TestRaidPurge extends TestCase {
public class TestRaidPurge {
final static String TEST_DIR = new File(System.getProperty("test.build.data",
"target/test-data")).getAbsolutePath();
final static String CONFIG_FILE = new File(TEST_DIR,
@ -206,6 +195,7 @@ private void stopClusters() throws Exception {
* Test that parity files that do not have an associated master file
* get deleted.
*/
@Test
public void testPurge() throws Exception {
LOG.info("Test testPurge started.");
@ -312,6 +302,7 @@ private void doTestPurge(int iter, long targetReplication,
* Create a file, wait for parity file to get HARed. Then modify the file,
* wait for the HAR to get purged.
*/
@Test
public void testPurgeHar() throws Exception {
LOG.info("testPurgeHar started");
int harDelay = 0;
@ -381,6 +372,7 @@ public void testPurgeHar() throws Exception {
* Create parity file, delete original file's directory and then validate that
* parity directory is automatically deleted.
*/
@Test
public void testPurgeDirectory() throws Exception {
long stripeLength = 5;
long blockSize = 8192;
@ -433,6 +425,7 @@ public void testPurgeDirectory() throws Exception {
/**
* Test that an XOR parity file is removed when a RS parity file is detected.
*/
@Test
public void testPurgePreference() throws Exception {
createClusters(true);
Path dir = new Path("/user/test/raidtest/");

View File

@ -17,34 +17,35 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Random;
import java.util.zip.CRC32;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.raid.RaidNode;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
public class TestRaidShell extends TestCase {
public class TestRaidShell {
final static Log LOG = LogFactory.getLog(
"org.apache.hadoop.raid.TestRaidShell");
final static String TEST_DIR = new File(System.getProperty("test.build.data",
@ -65,6 +66,7 @@ public class TestRaidShell extends TestCase {
* Create a file with three stripes, corrupt a block each in two stripes,
* and wait for the the file to be fixed.
*/
@Test
public void testBlockFix() throws Exception {
LOG.info("Test testBlockFix started.");
long blockSize = 8192L;

View File

@ -17,34 +17,31 @@
*/
package org.apache.hadoop.raid;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileWriter;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Random;
import org.junit.Test;
import org.junit.After;
import static org.junit.Assert.assertTrue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.raid.RaidNode;
import org.apache.hadoop.raid.HarIndex;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Test;
public class TestRaidShellFsck {

View File

@ -18,27 +18,29 @@
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.RaidDFSUtil;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.mapred.Reporter;
import org.junit.Test;
public class TestReedSolomonDecoder extends TestCase {
public class TestReedSolomonDecoder {
final static Log LOG = LogFactory.getLog(
"org.apache.hadoop.raid.TestReedSolomonDecoder");
final static String TEST_DIR = new File(System.getProperty("test.build.data",
@ -49,6 +51,7 @@ public class TestReedSolomonDecoder extends TestCase {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
@Test
public void testDecoder() throws Exception {
mySetup();
int stripeSize = 10;

View File

@ -18,34 +18,23 @@
package org.apache.hadoop.raid;
import static org.junit.Assert.assertEquals;
import java.io.File;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.DistributedRaidFileSystem;
import org.apache.hadoop.hdfs.TestRaidDfs;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.raid.RaidNode;
import org.junit.Test;
public class TestReedSolomonEncoder extends TestCase {
public class TestReedSolomonEncoder {
final static Log LOG = LogFactory.getLog(
"org.apache.hadoop.raid.TestReedSolomonEncoder");
final static String TEST_DIR = new File(System.getProperty("test.build.data",
@ -57,6 +46,7 @@ public class TestReedSolomonEncoder extends TestCase {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
@Test
public void testEncoder() throws Exception {
mySetup();
int stripeSize = 10;

View File

@ -109,6 +109,8 @@ Trunk (unreleased changes)
HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay)
HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm)
OPTIMIZATIONS
BUG FIXES

View File

@ -17,7 +17,11 @@
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.*;
import org.apache.hadoop.cli.util.CLICommandDFSAdmin;
import org.apache.hadoop.cli.util.CLICommandTypes;
import org.apache.hadoop.cli.util.CLITestCmd;
import org.apache.hadoop.cli.util.CommandExecutor;
import org.apache.hadoop.cli.util.FSCmdExecutor;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
public class CLITestCmdDFS extends CLITestCmd {

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.cli;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
@ -27,7 +29,6 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.junit.After;
import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test;

View File

@ -17,19 +17,23 @@
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import junit.framework.TestCase;
public class TestGlobPaths {
public class TestGlobPaths extends TestCase {
static class RegexPathFilter implements PathFilter {
private final String regex;
public RegexPathFilter(String regex) {
this.regex = regex;
@ -41,15 +45,15 @@ public boolean accept(Path path) {
}
}
static private MiniDFSCluster dfsCluster;
static private FileSystem fs;
static final private int NUM_OF_PATHS = 4;
static final String USER_DIR = "/user/"+System.getProperty("user.name");
private Path[] path = new Path[NUM_OF_PATHS];
@Override
protected void setUp() throws Exception {
@Before
public void setUp() throws Exception {
try {
Configuration conf = new HdfsConfiguration();
dfsCluster = new MiniDFSCluster.Builder(conf).build();
@ -59,13 +63,14 @@ protected void setUp() throws Exception {
}
}
@Override
protected void tearDown() throws Exception {
@After
public void tearDown() throws Exception {
if(dfsCluster!=null) {
dfsCluster.shutdown();
}
}
@Test
public void testPathFilter() throws IOException {
try {
String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" };
@ -78,6 +83,7 @@ public void testPathFilter() throws IOException {
}
}
@Test
public void testPathFilterWithFixedLastComponent() throws IOException {
try {
String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b",
@ -91,6 +97,7 @@ public void testPathFilterWithFixedLastComponent() throws IOException {
}
}
@Test
public void testGlob() throws Exception {
//pTestEscape(); // need to wait until HADOOP-1995 is fixed
pTestJavaRegexSpecialChars();

View File

@ -18,6 +18,9 @@
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.FileContextTestHelper.exists;
import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath;
import java.io.IOException;
import java.net.URISyntaxException;
@ -27,8 +30,8 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
@ -37,8 +40,6 @@
import org.junit.BeforeClass;
import org.junit.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
public class TestHDFSFileContextMainOperations extends
FileContextMainOperationsBaseTest {
private static MiniDFSCluster cluster;

View File

@ -28,7 +28,6 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
@ -25,19 +28,15 @@
import java.net.URISyntaxException;
import java.net.URL;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* Test of the URL stream handler factory.
*/
public class TestUrlStreamHandler extends TestCase {
public class TestUrlStreamHandler {
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
@ -47,6 +46,7 @@ public class TestUrlStreamHandler extends TestCase {
*
* @throws IOException
*/
@Test
public void testDfsUrls() throws IOException {
Configuration conf = new HdfsConfiguration();
@ -105,6 +105,7 @@ public void testDfsUrls() throws IOException {
* @throws IOException
* @throws URISyntaxException
*/
@Test
public void testFileUrls() throws IOException, URISyntaxException {
// URLStreamHandler is already set in JVM by testDfsUrls()
Configuration conf = new HdfsConfiguration();

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.fs.loadGenerator;
import static org.junit.Assert.assertEquals;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
@ -27,9 +29,6 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import static org.junit.Assert.*;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

View File

@ -17,9 +17,12 @@
*/
package org.apache.hadoop.fs.permission;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import junit.framework.TestCase;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -32,8 +35,9 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
public class TestStickyBit extends TestCase {
public class TestStickyBit {
static UserGroupInformation user1 =
UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
@ -158,6 +162,7 @@ private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
}
@Test
public void testGeneralSBBehavior() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
@ -195,6 +200,7 @@ public void testGeneralSBBehavior() throws IOException, InterruptedException {
* Test that one user can't rename/move another user's file when the sticky
* bit is set.
*/
@Test
public void testMovingFiles() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
@ -243,6 +249,7 @@ public void testMovingFiles() throws IOException, InterruptedException {
* the sticky bit back on re-start, and that no extra sticky bits appear after
* re-start.
*/
@Test
public void testStickyBitPersistence() throws IOException {
MiniDFSCluster cluster = null;
try {

View File

@ -18,26 +18,6 @@
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
@ -46,17 +26,29 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Tests for viewfs implementation of default fs level values.

View File

@ -23,6 +23,9 @@
* Since viewfs has overlayed ViewFsFileStatus, we ran into
* serialization problems. THis test is test the fix.
*/
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@ -40,11 +43,9 @@
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestViewFsFileStatusHdfs {

View File

@ -17,13 +17,12 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Random;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -121,16 +120,16 @@ public static void check(FileSystem fs, Path p, long length) throws IOException
FSDataInputStream in = fs.open(p);
if (in.getWrappedStream() instanceof DFSInputStream) {
long len = ((DFSInputStream)in.getWrappedStream()).getFileLength();
TestCase.assertEquals(length, len);
assertEquals(length, len);
} else {
TestCase.assertEquals(length, status.getLen());
assertEquals(length, status.getLen());
}
for(i++; i < length; i++) {
TestCase.assertEquals((byte)i, (byte)in.read());
assertEquals((byte)i, (byte)in.read());
}
i = -(int)length;
TestCase.assertEquals(-1, in.read()); //EOF
assertEquals(-1, in.read()); //EOF
in.close();
} catch(IOException ioe) {
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
@ -175,7 +174,7 @@ public static void checkFullFile(FileSystem fs, Path name, int len,
private static void checkData(final byte[] actual, int from,
final byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
expected[from+idx], actual[idx]);
actual[idx] = 0;
@ -189,7 +188,7 @@ public static void testAppend(FileSystem fs, Path p) throws IOException {
final FSDataOutputStream out = fs.create(p, (short)1);
out.write(bytes);
out.close();
Assert.assertEquals(bytes.length, fs.getFileStatus(p).getLen());
assertEquals(bytes.length, fs.getFileStatus(p).getLen());
}
for(int i = 2; i < 500; i++) {
@ -197,7 +196,7 @@ public static void testAppend(FileSystem fs, Path p) throws IOException {
final FSDataOutputStream out = fs.append(p);
out.write(bytes);
out.close();
Assert.assertEquals(i*bytes.length, fs.getFileStatus(p).getLen());
assertEquals(i*bytes.length, fs.getFileStatus(p).getLen());
}
}
}

View File

@ -36,7 +36,6 @@
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
/**

View File

@ -18,25 +18,26 @@
package org.apache.hadoop.hdfs;
import java.net.Socket;
import java.net.InetSocketAddress;
import java.io.DataOutputStream;
import java.util.Random;
import java.util.List;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.net.NetUtils;
import static org.junit.Assert.*;
/**
* A helper class to setup the cluster, and get to BlockReader and DataNode for a block.
*/

View File

@ -55,7 +55,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil.Builder;
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeID;

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -27,8 +29,6 @@
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
/** This is a comprehensive append test that tries
* all combinations of file length and number of appended bytes
* In each iteration, it creates a file of len1. Then reopen

View File

@ -17,18 +17,20 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import static org.junit.Assert.*;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

View File

@ -17,27 +17,27 @@
*/
package org.apache.hadoop.hdfs;
import java.util.ArrayList;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.junit.Test;
/**
* This test ensures that the balancer bandwidth is dynamically adjusted
* correctly.
*/
public class TestBalancerBandwidth extends TestCase {
public class TestBalancerBandwidth {
final static private Configuration conf = new Configuration();
final static private int NUM_OF_DATANODES = 2;
final static private int DEFAULT_BANDWIDTH = 1024*1024;
public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class);
@Test
public void testBalancerBandwidth() throws Exception {
/* Set bandwidthPerSec to a low value of 1M bps. */
conf.setLong(

View File

@ -17,26 +17,24 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.BlockMissingException;
import org.junit.Test;
public class TestBlockMissingException extends TestCase {
public class TestBlockMissingException {
final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing");
final static int NUM_DATANODES = 3;
@ -47,6 +45,7 @@ public class TestBlockMissingException extends TestCase {
/**
* Test DFS Raid
*/
@Test
public void testBlockMissingException() throws Exception {
LOG.info("Test testBlockMissingException started.");
long blockSize = 1024L;

View File

@ -17,24 +17,26 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.junit.Test;
/**
* This class tests DatanodeDescriptor.getBlocksScheduled() at the
* NameNode. This counter is supposed to keep track of blocks currently
* scheduled to a datanode.
*/
public class TestBlocksScheduledCounter extends TestCase {
public class TestBlocksScheduledCounter {
@Test
public void testBlocksScheduledCounter() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
.build();

View File

@ -18,21 +18,20 @@
package org.apache.hadoop.hdfs;
import java.util.List;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Level;
import org.junit.Test;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.never;
import java.util.List;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestClientBlockVerification {

View File

@ -26,11 +26,9 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import org.junit.Assert;
import org.junit.Test;
/**
* This tests pipeline recovery related client protocol works correct or not.

View File

@ -17,37 +17,33 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.spy;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.SocketCache;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token;
import org.junit.Test;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import static org.junit.Assert.*;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mockito;
import org.mockito.stubbing.Answer;
import org.mockito.invocation.InvocationOnMock;
import static org.mockito.Mockito.spy;
import org.mockito.stubbing.Answer;
/**
* This class tests the client connection caching in a single node

View File

@ -18,21 +18,23 @@
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.RandomAccessFile;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Random;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
/**
* A JUnit test for corrupted file handling.

View File

@ -24,20 +24,25 @@
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.ArrayList;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.Test;
public class TestDFSAddressConfig extends TestCase {
public class TestDFSAddressConfig {
@Test
public void testDFSAddressConfig() throws IOException {
Configuration conf = new HdfsConfiguration();

View File

@ -17,15 +17,15 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.OutputStream;
import org.junit.*;
import static org.junit.Assert.fail;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
@ -38,8 +42,6 @@
import java.util.List;
import java.util.concurrent.TimeUnit;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@ -75,6 +77,7 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.ThrowsException;
import org.mockito.invocation.InvocationOnMock;
@ -86,7 +89,7 @@
* These tests make sure that DFSClient retries fetching data from DFS
* properly in case of errors.
*/
public class TestDFSClientRetries extends TestCase {
public class TestDFSClientRetries {
private static final String ADDRESS = "0.0.0.0";
final static private int PING_INTERVAL = 1000;
final static private int MIN_SLEEP_TIME = 1000;
@ -146,6 +149,7 @@ private static void writeData(OutputStream out, int len) throws IOException {
* This makes sure that when DN closes clients socket after client had
* successfully connected earlier, the data can still be fetched.
*/
@Test
public void testWriteTimeoutAtDataNode() throws IOException,
InterruptedException {
final int writeTimeout = 100; //milliseconds.
@ -198,6 +202,7 @@ public void testWriteTimeoutAtDataNode() throws IOException,
* of times trying to add a block
*/
@SuppressWarnings("serial")
@Test
public void testNotYetReplicatedErrors() throws IOException
{
final String exceptionMsg = "Nope, not replicated yet...";
@ -242,6 +247,7 @@ public Object answer(InvocationOnMock invocation)
* operation, and not over the lifetime of the stream. It is a regression
* test for HDFS-127.
*/
@Test
public void testFailuresArePerOperation() throws Exception
{
long fileSize = 4096;
@ -317,6 +323,7 @@ public void testFailuresArePerOperation() throws Exception
* a client to safely retry a call and still produce a correct
* file. See HDFS-3031.
*/
@Test
public void testIdempotentAllocateBlockAndClose() throws Exception {
final String src = "/testIdempotentAllocateBlock";
Path file = new Path(src);
@ -457,6 +464,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
/**
* Test that a DFSClient waits for random time before retry on busy blocks.
*/
@Test
public void testDFSClientRetriesOnBusyBlocks() throws IOException {
System.out.println("Testing DFSClient random waiting on busy blocks.");
@ -700,6 +708,7 @@ class Counter {
public int get() { return counter; }
}
@Test
public void testGetFileChecksum() throws Exception {
final String f = "/testGetFileChecksum";
final Path p = new Path(f);
@ -736,6 +745,7 @@ public void testGetFileChecksum() throws Exception {
* RPC to the server and set rpcTimeout to less than n and ensure
* that socketTimeoutException is obtained
*/
@Test
public void testClientDNProtocolTimeout() throws IOException {
final Server server = new TestServer(1, true);
server.start();
@ -770,6 +780,7 @@ public void testClientDNProtocolTimeout() throws IOException {
* read call, so the client should expect consecutive calls to behave the same
* way. See HDFS-3067.
*/
@Test
public void testRetryOnChecksumFailure()
throws UnresolvedLinkException, IOException {
HdfsConfiguration conf = new HdfsConfiguration();
@ -812,6 +823,7 @@ public void testRetryOnChecksumFailure()
}
/** Test client retry with namenode restarting. */
@Test
public void testNamenodeRestart() throws Exception {
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
@ -937,6 +949,7 @@ public void run() {
}
}
@Test
public void testMultipleLinearRandomRetry() {
parseMultipleLinearRandomRetry(null, "");
parseMultipleLinearRandomRetry(null, "11");

View File

@ -17,17 +17,21 @@
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.File;
import java.util.Collections;
import java.util.List;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.junit.After;
import org.junit.Test;
import com.google.common.collect.Lists;
@ -35,7 +39,7 @@
* This test ensures the appropriate response from the system when
* the system is finalized.
*/
public class TestDFSFinalize extends TestCase {
public class TestDFSFinalize {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSFinalize");
@ -86,6 +90,7 @@ DATA_NODE, new File(dataNodeDirs[i],"current")),
/**
* This test attempts to finalize the NameNode and DataNode.
*/
@Test
public void testFinalize() throws Exception {
UpgradeUtilities.initialize();
@ -125,8 +130,8 @@ public void testFinalize() throws Exception {
} // end numDir loop
}
@Override
protected void tearDown() throws Exception {
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}

View File

@ -17,21 +17,27 @@
*/
package org.apache.hadoop.hdfs;
import junit.framework.TestCase;
import java.io.*;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* This class tests that the DFS command mkdirs cannot create subdirectories
* from a file when passed an illegal path. HADOOP-281.
*/
public class TestDFSMkdirs extends TestCase {
public class TestDFSMkdirs {
private void writeFile(FileSystem fileSys, Path name) throws IOException {
DataOutputStream stm = fileSys.create(name);
@ -43,6 +49,7 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {
* Tests mkdirs can create a directory that does not exist and will
* not create a subdirectory off a file.
*/
@Test
public void testDFSMkdirs() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -80,6 +87,7 @@ public void testDFSMkdirs() throws IOException {
/**
* Tests mkdir will not create directory when parent is missing.
*/
@Test
public void testMkdir() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

View File

@ -17,14 +17,15 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import junit.framework.AssertionFailedError;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -35,13 +36,15 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/** Unit tests for permission */
public class TestDFSPermission extends TestCase {
public class TestDFSPermission {
public static final Log LOG = LogFactory.getLog(TestDFSPermission.class);
final private static Configuration conf = new HdfsConfiguration();
@ -106,13 +109,13 @@ public class TestDFSPermission extends TestCase {
}
}
@Override
@Before
public void setUp() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
}
@Override
@After
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
@ -122,6 +125,7 @@ public void tearDown() throws IOException {
/** This tests if permission setting in create, mkdir, and
* setPermission works correctly
*/
@Test
public void testPermissionSetting() throws Exception {
testPermissionSetting(OpType.CREATE); // test file creation
testPermissionSetting(OpType.MKDIRS); // test directory creation
@ -257,6 +261,7 @@ private void checkPermission(Path name, short expectedPermission,
* check that ImmutableFsPermission can be used as the argument
* to setPermission
*/
@Test
public void testImmutableFsPermission() throws IOException {
fs = FileSystem.get(conf);
@ -266,6 +271,7 @@ public void testImmutableFsPermission() throws IOException {
}
/* check if the ownership of a file/directory is set correctly */
@Test
public void testOwnership() throws Exception {
testOwnership(OpType.CREATE); // test file creation
testOwnership(OpType.MKDIRS); // test directory creation
@ -354,6 +360,7 @@ private enum OpType {CREATE, MKDIRS, OPEN, SET_REPLICATION,
/* Check if namenode performs permission checking correctly for
* superuser, file owner, group owner, and other users */
@Test
public void testPermissionChecking() throws Exception {
try {
fs = FileSystem.get(conf);
@ -533,7 +540,7 @@ void verifyPermission(UserGroupInformation ugi) throws IOException {
} catch(AccessControlException e) {
assertTrue(expectPermissionDeny());
}
} catch (AssertionFailedError ae) {
} catch (AssertionError ae) {
logPermissions();
throw ae;
}

View File

@ -16,6 +16,8 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
@ -26,8 +28,9 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
public class TestDFSRemove extends junit.framework.TestCase {
public class TestDFSRemove {
final Path dir = new Path("/test/remove/");
void list(FileSystem fs, String name) throws IOException {
@ -51,6 +54,7 @@ static long getTotalDfsUsed(MiniDFSCluster cluster) throws IOException {
return total;
}
@Test
public void testRemove() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

View File

@ -16,6 +16,9 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
@ -25,8 +28,9 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.junit.Test;
public class TestDFSRename extends junit.framework.TestCase {
public class TestDFSRename {
static int countLease(MiniDFSCluster cluster) {
return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease();
}
@ -46,6 +50,7 @@ static void createFile(FileSystem fs, Path f) throws IOException {
a_out.close();
}
@Test
public void testRename() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

View File

@ -19,22 +19,25 @@
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
@ -44,7 +47,7 @@
* the system when the system is rolled back under various storage state and
* version conditions.
*/
public class TestDFSRollback extends TestCase {
public class TestDFSRollback {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSRollback");
@ -131,6 +134,7 @@ void startBlockPoolShouldFail(StartupOption operation, String bpid)
* This test attempts to rollback the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test
public void testRollback() throws Exception {
File[] baseDirs;
UpgradeUtilities.initialize();
@ -299,8 +303,8 @@ private void deleteMatchingFiles(File[] baseDirs, String regex) {
}
}
@Override
protected void tearDown() throws Exception {
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.File;
@ -33,8 +37,6 @@
import java.util.Scanner;
import java.util.zip.GZIPOutputStream;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -53,11 +55,12 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
/**
* This class tests commands from DFSShell.
*/
public class TestDFSShell extends TestCase {
public class TestDFSShell {
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
static final String TEST_ROOT_DIR =
@ -94,6 +97,7 @@ static void show(String s) {
System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
}
@Test
public void testZeroSizeFile() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -136,6 +140,7 @@ public void testZeroSizeFile() throws IOException {
}
}
@Test
public void testRecrusiveRm() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -161,6 +166,7 @@ public void testRecrusiveRm() throws IOException {
}
}
@Test
public void testDu() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -210,6 +216,7 @@ public void testDu() throws IOException {
}
}
@Test
public void testPut() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -308,6 +315,7 @@ public void checkPermission(Permission perm) {
/** check command error outputs and exit statuses. */
@Test
public void testErrOutPut() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
@ -448,6 +456,7 @@ public void testErrOutPut() throws Exception {
}
}
@Test
public void testURIPaths() throws Exception {
Configuration srcConf = new HdfsConfiguration();
Configuration dstConf = new HdfsConfiguration();
@ -540,6 +549,7 @@ public void testURIPaths() throws Exception {
}
}
@Test
public void testText() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
@ -614,6 +624,7 @@ private void textTest(Path root, Configuration conf) throws Exception {
}
}
@Test
public void testCopyToLocal() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -711,6 +722,7 @@ static String createTree(FileSystem fs, String name) throws IOException {
return path;
}
@Test
public void testCount() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -877,6 +889,7 @@ private void confirmOwner(String owner, String group,
}
}
@Test
public void testFilePermissions() throws IOException {
Configuration conf = new HdfsConfiguration();
@ -942,6 +955,7 @@ public void testFilePermissions() throws IOException {
/**
* Tests various options of DFSShell.
*/
@Test
public void testDFSShell() throws IOException {
Configuration conf = new HdfsConfiguration();
/* This tests some properties of ChecksumFileSystem as well.
@ -1209,6 +1223,7 @@ static interface TestGetRunner {
String run(int exitcode, String... options) throws IOException;
}
@Test
public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI =
UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
@ -1252,6 +1267,7 @@ public Object run() throws Exception {
}
}
@Test
public void testGet() throws IOException {
DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
final Configuration conf = new HdfsConfiguration();
@ -1312,6 +1328,7 @@ public String run(int exitcode, String... options) throws IOException {
}
}
@Test
public void testLsr() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@ -1369,6 +1386,7 @@ private static String runLsr(final FsShell shell, String root, int returnvalue
* and return -1 exit code.
* @throws Exception
*/
@Test
public void testInvalidShell() throws Exception {
Configuration conf = new Configuration(); // default FS (non-DFS)
DFSAdmin admin = new DFSAdmin();
@ -1378,6 +1396,7 @@ public void testInvalidShell() throws Exception {
}
// force Copy Option is -f
@Test
public void testCopyCommandsWithForceOption() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)

View File

@ -17,22 +17,24 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
public class TestDFSShellGenericOptions extends TestCase {
public class TestDFSShellGenericOptions {
@Test
public void testDFSCommand() throws IOException {
String namenode = null;
MiniDFSCluster cluster = null;

View File

@ -19,25 +19,27 @@
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.junit.After;
import org.junit.Test;
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
*/
public class TestDFSStartupVersions extends TestCase {
public class TestDFSStartupVersions {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSStartupVersions");
@ -235,6 +237,7 @@ boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) {
* this iterations version 3-tuple
* </pre>
*/
@Test
public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1,
@ -276,8 +279,8 @@ public void testVersions() throws Exception {
}
}
@Override
protected void tearDown() throws Exception {
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}

View File

@ -17,25 +17,32 @@
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This test ensures the appropriate response (successful or failure) from
* the system when the system is started under various storage state and
* version conditions.
*/
public class TestDFSStorageStateRecovery extends TestCase {
public class TestDFSStorageStateRecovery {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSStorageStateRecovery");
@ -311,6 +318,7 @@ private MiniDFSCluster createCluster(Configuration c) throws IOException {
* This test iterates over the testCases table and attempts
* to startup the NameNode normally.
*/
@Test
public void testNNStorageStates() throws Exception {
String[] baseDirs;
@ -354,6 +362,7 @@ public void testNNStorageStates() throws Exception {
* This test iterates over the testCases table for Datanode storage and
* attempts to startup the DataNode normally.
*/
@Test
public void testDNStorageStates() throws Exception {
String[] baseDirs;
@ -394,6 +403,7 @@ public void testDNStorageStates() throws Exception {
* This test iterates over the testCases table for block pool storage and
* attempts to startup the DataNode normally.
*/
@Test
public void testBlockPoolStorageStates() throws Exception {
String[] baseDirs;
@ -431,15 +441,15 @@ public void testBlockPoolStorageStates() throws Exception {
} // end numDirs loop
}
@Override
protected void setUp() throws Exception {
@Before
public void setUp() throws Exception {
LOG.info("Setting up the directory structures.");
UpgradeUtilities.initialize();
}
@Override
protected void tearDown() throws Exception {
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}
}
}

View File

@ -19,6 +19,13 @@
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.apache.hadoop.test.GenericTestUtils.assertExists;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
@ -27,14 +34,10 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.test.GenericTestUtils.assertExists;
import org.apache.hadoop.util.StringUtils;
import org.junit.BeforeClass;
import org.junit.Ignore;
@ -43,8 +46,6 @@
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import static org.junit.Assert.*;
/**
* This test ensures the appropriate response (successful or failure) from
* the system when the system is upgraded under various storage state and

View File

@ -18,13 +18,22 @@
package org.apache.hadoop.hdfs;
import junit.framework.TestCase;
import java.io.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.TreeMap;
import java.util.zip.CRC32;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileStatus;
@ -34,8 +43,7 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
/**
* This tests data transfer protocol handling in the Datanode. It sends
@ -46,7 +54,7 @@
* 2) hadoop-dfs-dir.txt : checksums that are compared in this test.
* Please read hadoop-dfs-dir.txt for more information.
*/
public class TestDFSUpgradeFromImage extends TestCase {
public class TestDFSUpgradeFromImage {
private static final Log LOG = LogFactory
.getLog(TestDFSUpgradeFromImage.class);
@ -182,6 +190,7 @@ private void verifyFileSystem(DistributedFileSystem dfs) throws IOException {
* Test that sets up a fake image from Hadoop 0.3.0 and tries to start a
* NN, verifying that the correct error message is thrown.
*/
@Test
public void testFailOnPreUpgradeImage() throws IOException {
Configuration conf = new HdfsConfiguration();
@ -225,6 +234,7 @@ public void testFailOnPreUpgradeImage() throws IOException {
/**
* Test upgrade from 0.22 image
*/
@Test
public void testUpgradeFromRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE);
upgradeAndVerify();
@ -234,6 +244,7 @@ public void testUpgradeFromRel22Image() throws IOException {
* Test upgrade from 0.22 image with corrupt md5, make sure it
* fails to upgrade
*/
@Test
public void testUpgradeFromCorruptRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE);

View File

@ -18,10 +18,22 @@
package org.apache.hadoop.hdfs;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetSocketAddress;
@ -34,18 +46,18 @@
import java.util.Map;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Before;
import org.junit.Test;
public class TestDFSUtil {

View File

@ -20,7 +20,10 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.InputStream;
import java.io.PrintWriter;

View File

@ -18,6 +18,10 @@
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
@ -28,8 +32,6 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -40,11 +42,12 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* This test verifies that block verification occurs on the datanode
*/
public class TestDatanodeBlockScanner extends TestCase {
public class TestDatanodeBlockScanner {
private static final Log LOG =
LogFactory.getLog(TestDatanodeBlockScanner.class);
@ -118,6 +121,7 @@ private static long waitForVerification(int infoPort, FileSystem fs,
return verificationTime;
}
@Test
public void testDatanodeBlockScanner() throws IOException, TimeoutException {
long startTime = Time.now();
@ -168,6 +172,7 @@ public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOEx
return MiniDFSCluster.corruptReplica(replica, blk);
}
@Test
public void testBlockCorruptionPolicy() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
@ -232,12 +237,14 @@ public void testBlockCorruptionPolicy() throws IOException {
* 4. Test again waits until the block is reported with expected number
* of good replicas.
*/
@Test
public void testBlockCorruptionRecoveryPolicy1() throws Exception {
// Test recovery of 1 corrupt replica
LOG.info("Testing corrupt replica recovery for one corrupt replica");
blockCorruptionRecoveryPolicy(4, (short)3, 1);
}
@Test
public void testBlockCorruptionRecoveryPolicy2() throws Exception {
// Test recovery of 2 corrupt replicas
LOG.info("Testing corrupt replica recovery for two corrupt replicas");
@ -302,6 +309,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes,
}
/** Test if NameNode handles truncated blocks in block report */
@Test
public void testTruncatedBlockReport() throws Exception {
final Configuration conf = new HdfsConfiguration();
final short REPLICATION_FACTOR = (short)2;

View File

@ -17,9 +17,10 @@
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@ -31,17 +32,18 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* This class tests that pipelines survive data node death and recovery.
*/
public class TestDatanodeDeath extends TestCase {
public class TestDatanodeDeath {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@ -410,11 +412,15 @@ private void simpleTest(int datanodeToKill) throws IOException {
}
}
@Test
public void testSimple0() throws IOException {simpleTest(0);}
@Test
public void testSimple1() throws IOException {simpleTest(1);}
@Test
public void testSimple2() throws IOException {simpleTest(2);}
@Test
public void testComplex() throws IOException {complexTest();}
}

View File

@ -19,7 +19,8 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.*;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import java.net.InetSocketAddress;

View File

@ -17,27 +17,30 @@
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.junit.Test;
/**
* This test ensures the all types of data node report work correctly.
*/
public class TestDatanodeReport extends TestCase {
public class TestDatanodeReport {
final static private Configuration conf = new HdfsConfiguration();
final static private int NUM_OF_DATANODES = 4;
/**
* This test attempts to different types of datanode report.
*/
@Test
public void testDatanodeReport() throws Exception {
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s

View File

@ -17,18 +17,20 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.net.InetSocketAddress;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.Test;
/** Test NameNode port defaulting code. */
public class TestDefaultNameNodePort extends TestCase {
public class TestDefaultNameNodePort {
@Test
public void testGetAddressFromString() throws Exception {
assertEquals(NameNode.getAddress("foo").getPort(),
NameNode.DEFAULT_PORT);
@ -40,6 +42,7 @@ public void testGetAddressFromString() throws Exception {
555);
}
@Test
public void testGetAddressFromConf() throws Exception {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://foo/");
@ -50,6 +53,7 @@ public void testGetAddressFromConf() throws Exception {
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
}
@Test
public void testGetUri() {
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
URI.create("hdfs://foo:555"));

View File

@ -18,13 +18,15 @@
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.conf.Configuration;
import junit.framework.TestCase;
import static org.junit.Assert.assertTrue;
public class TestDeprecatedKeys extends TestCase {
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestDeprecatedKeys {
//Tests a deprecated key
@Test
public void testDeprecatedKeys() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set("topology.script.file.name", "xyz");

View File

@ -17,14 +17,16 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -35,11 +37,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
/**
* This class tests if FSInputChecker works correctly.
*/
public class TestFSInputChecker extends TestCase {
public class TestFSInputChecker {
static final long seed = 0xDEADBEEFL;
static final int BYTES_PER_SUM = 10;
static final int BLOCK_SIZE = 2*BYTES_PER_SUM;
@ -291,6 +294,7 @@ private void checkFileCorruption(LocalFileSystem fileSys, Path file,
in.close();
}
@Test
public void testFSInputChecker() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

View File

@ -17,21 +17,24 @@
*/
package org.apache.hadoop.hdfs;
import junit.framework.TestCase;
import java.io.*;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* This class tests if FSOutputSummer works correctly.
*/
public class TestFSOutputSummer extends TestCase {
public class TestFSOutputSummer {
private static final long seed = 0xDEADBEEFL;
private static final int BYTES_PER_CHECKSUM = 10;
private static final int BLOCK_SIZE = 2*BYTES_PER_CHECKSUM;
@ -111,6 +114,7 @@ private void cleanupFile(Path name) throws IOException {
/**
* Test write opeation for output stream in DFS.
*/
@Test
public void testFSOutputSummer() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

Some files were not shown because too many files have changed in this diff Show More