HDFS-9168. Move client side unit test to hadoop-hdfs-client. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2015-10-28 15:54:37 -07:00
parent 73bc65e02b
commit 65f53f246a
25 changed files with 121 additions and 106 deletions

View File

@ -51,6 +51,27 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mock-server</groupId>
<artifactId>mockserver-netty</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<build>

View File

@ -30,16 +30,49 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ReplaceDatanodeOnFailure {
/**
* DEFAULT condition:
* Let r be the replication number.
* Let n be the number of existing datanodes.
* Add a new datanode only if r >= 3 and either
* (1) floor(r/2) >= n or (2) the block is hflushed/appended.
*/
private static final Condition CONDITION_DEFAULT = new Condition() {
@Override
public boolean satisfy(final short replication,
final DatanodeInfo[] existings, final int n, final boolean isAppend,
final boolean isHflushed) {
return replication >= 3 &&
(n <= (replication / 2) || isAppend || isHflushed);
}
};
/** Return false unconditionally. */
private static final Condition CONDITION_FALSE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return false;
}
};
/** Return true unconditionally. */
private static final Condition CONDITION_TRUE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return true;
}
};
/** The replacement policies */
public enum Policy {
/** The feature is disabled in the entire site. */
DISABLE(Condition.FALSE),
DISABLE(CONDITION_FALSE),
/** Never add a new datanode. */
NEVER(Condition.FALSE),
/** @see ReplaceDatanodeOnFailure.Condition#DEFAULT */
DEFAULT(Condition.DEFAULT),
NEVER(CONDITION_FALSE),
/** @see ReplaceDatanodeOnFailure#CONDITION_DEFAULT */
DEFAULT(CONDITION_DEFAULT),
/** Always add a new datanode when an existing datanode is removed. */
ALWAYS(Condition.TRUE);
ALWAYS(CONDITION_TRUE);
private final Condition condition;
@ -54,41 +87,6 @@ public class ReplaceDatanodeOnFailure {
/** Datanode replacement condition */
private interface Condition {
/** Return true unconditionally. */
Condition TRUE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return true;
}
};
/** Return false unconditionally. */
Condition FALSE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return false;
}
};
/**
* DEFAULT condition:
* Let r be the replication number.
* Let n be the number of existing datanodes.
* Add a new datanode only if r >= 3 and either
* (1) floor(r/2) >= n; or
* (2) r > n and the block is hflushed/appended.
*/
Condition DEFAULT = new Condition() {
@Override
public boolean satisfy(final short replication,
final DatanodeInfo[] existings, final int n, final boolean isAppend,
final boolean isHflushed) {
return replication >= 3 &&
(n <= (replication / 2) || isAppend || isHflushed);
}
};
/** Is the condition satisfied? */
boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings,

View File

@ -17,17 +17,15 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Test;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/** Test NameNode port defaulting code. */
public class TestDefaultNameNodePort {

View File

@ -17,17 +17,7 @@
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.ReadableByteChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.google.common.collect.HashMultiset;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.net.unix.DomainSocket;
@ -35,11 +25,20 @@ import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.HashMultiset;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.ReadableByteChannel;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
public class TestPeerCache {
static final Log LOG = LogFactory.getLog(TestPeerCache.class);
static final Logger LOG = LoggerFactory.getLogger(TestPeerCache.class);
private static class FakePeer implements Peer {
private boolean closed = false;

View File

@ -17,14 +17,9 @@
*/
package org.apache.hadoop.hdfs.client.impl;
import static org.junit.Assert.assertSame;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.base.Supplier;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
@ -35,7 +30,10 @@ import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Supplier;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.assertSame;
public class TestLeaseRenewer {
private final String FAKE_AUTHORITY="hdfs://nn1/";

View File

@ -17,16 +17,8 @@
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.File;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
@ -34,9 +26,17 @@ import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.Iterator;
public class TestShortCircuitShm {
public static final Log LOG = LogFactory.getLog(TestShortCircuitShm.class);
public static final Logger LOG = LoggerFactory.getLogger(
TestShortCircuitShm.class);
private static final File TEST_BASE =
new File(System.getProperty("test.build.data", "/tmp"));

View File

@ -17,6 +17,19 @@
*/
package org.apache.hadoop.hdfs.util;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
@ -31,29 +44,16 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/**
* Test {@link ByteArrayManager}.
*/
public class TestByteArrayManager {
static {
GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
Level.ALL);
GenericTestUtils.setLogLevel(
LoggerFactory.getLogger(ByteArrayManager.class), Level.ALL);
}
static final Log LOG = LogFactory.getLog(TestByteArrayManager.class);
static final Logger LOG = LoggerFactory.getLogger(TestByteArrayManager.class);
private static final Comparator<Future<Integer>> CMP = new Comparator<Future<Integer>>() {
@Override
@ -559,9 +559,8 @@ public class TestByteArrayManager {
}
public static void main(String[] args) throws Exception {
GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
Level.OFF);
GenericTestUtils.setLogLevel(LoggerFactory.getLogger(ByteArrayManager.class),
Level.OFF);
final int arrayLength = 64 * 1024; //64k
final int nThreads = 512;
final int nAllocations = 1 << 15;

View File

@ -18,8 +18,6 @@
*/
package org.apache.hadoop.hdfs.web;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -38,6 +36,8 @@ import org.mockserver.integration.ClientAndServer;
import org.mockserver.model.Header;
import org.mockserver.model.HttpRequest;
import org.mockserver.model.HttpResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
@ -58,7 +58,8 @@ import static org.mockserver.model.HttpRequest.request;
import static org.mockserver.model.HttpResponse.response;
public class TestWebHDFSOAuth2 {
public static final Log LOG = LogFactory.getLog(TestWebHDFSOAuth2.class);
public static final Logger LOG = LoggerFactory.getLogger(
TestWebHDFSOAuth2.class);
private ClientAndServer mockWebHDFS;
private ClientAndServer mockOAuthServer;

View File

@ -1641,6 +1641,8 @@ Release 2.8.0 - UNRELEASED
HDFS-9297. Update TestBlockMissingException to use corruptBlockOnDataNodesByDeletingBlockFile().
(Tony Wu via lei)
HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)
BUG FIXES
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

View File

@ -211,12 +211,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>leveldbjni-all</artifactId>
<version>1.8</version>
</dependency>
<dependency>
<groupId>org.mock-server</groupId>
<artifactId>mockserver-netty</artifactId>
<version>3.9.2</version>
<scope>test</scope>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency>
<groupId>org.bouncycastle</groupId>

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.collect.Lists;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -259,7 +259,7 @@ public class TestAuditLogger {
auditlog.clearOutput();
// long context is truncated
final String longContext = RandomStringUtils.randomAscii(200);
final String longContext = StringUtils.repeat("foo", 100);
context = new CallerContext.Builder(longContext)
.setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING))
.build();

View File

@ -779,6 +779,11 @@
<artifactId>mockito-all</artifactId>
<version>1.8.5</version>
</dependency>
<dependency>
<groupId>org.mock-server</groupId>
<artifactId>mockserver-netty</artifactId>
<version>3.9.2</version>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>