HDFS-3711. Manually convert remaining tests to JUnit4. Contributed by Andrew Wang.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1365119 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1fd21078d8
commit
d8f3913856
|
@ -352,6 +352,8 @@ Branch-2 ( Unreleased changes )
|
|||
|
||||
HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm)
|
||||
|
||||
HDFS-3711. Manually convert remaining tests to JUnit4. (Andrew Wang via atm)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-2982. Startup performance suffers when there are many edit log
|
||||
|
|
|
@ -332,13 +332,12 @@ package org.apache.hadoop.fs;
|
|||
|
||||
import org.junit.Test;
|
||||
import org.junit.Before;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class DemoFiTest extends TestCase {
|
||||
public class DemoFiTest {
|
||||
public static final String BLOCK_RECEIVER_FAULT="hdfs.datanode.BlockReceiver";
|
||||
@Override
|
||||
@Before
|
||||
public void setUp(){
|
||||
public void setUp() {
|
||||
//Setting up the test's environment as required
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
|
@ -29,8 +33,6 @@ import java.net.Socket;
|
|||
import java.nio.ByteBuffer;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -67,7 +69,7 @@ import org.mockito.Mockito;
|
|||
* This tests data transfer protocol handling in the Datanode. It sends
|
||||
* various forms of wrong data and verifies that Datanode handles it well.
|
||||
*/
|
||||
public class TestDataTransferProtocol extends TestCase {
|
||||
public class TestDataTransferProtocol {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(
|
||||
"org.apache.hadoop.hdfs.TestDataTransferProtocol");
|
||||
|
@ -205,7 +207,8 @@ public class TestDataTransferProtocol extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test public void testOpWrite() throws IOException {
|
||||
@Test
|
||||
public void testOpWrite() throws IOException {
|
||||
int numDataNodes = 1;
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
|
||||
|
@ -333,7 +336,8 @@ public class TestDataTransferProtocol extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test public void testDataTransferProtocol() throws IOException {
|
||||
@Test
|
||||
public void testDataTransferProtocol() throws IOException {
|
||||
Random random = new Random();
|
||||
int oneMil = 1024*1024;
|
||||
Path file = new Path("dataprotocol.dat");
|
||||
|
|
|
@ -16,6 +16,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
@ -37,13 +41,17 @@ import org.apache.hadoop.io.IOUtils;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
/**
|
||||
* This class tests the cases of a concurrent reads/writes to a file;
|
||||
* ie, one writer and one or more readers can see unfinsihed blocks
|
||||
*/
|
||||
public class TestFileConcurrentReader extends junit.framework.TestCase {
|
||||
public class TestFileConcurrentReader {
|
||||
|
||||
private enum SyncType {
|
||||
SYNC,
|
||||
|
@ -69,18 +77,16 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
|
|||
private FileSystem fileSystem;
|
||||
|
||||
|
||||
@Override
|
||||
protected void setUp() throws IOException {
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
conf = new Configuration();
|
||||
init(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
cluster.shutdown();
|
||||
cluster = null;
|
||||
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private void init(Configuration conf) throws IOException {
|
||||
|
@ -145,6 +151,7 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
|
|||
/**
|
||||
* Test that that writes to an incomplete block are available to a reader
|
||||
*/
|
||||
@Test
|
||||
public void testUnfinishedBlockRead()
|
||||
throws IOException {
|
||||
// create a new file in the root, write data, do no close
|
||||
|
@ -167,6 +174,7 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
|
|||
* would result in too small a buffer to do the buffer-copy needed
|
||||
* for partial chunks.
|
||||
*/
|
||||
@Test
|
||||
public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
|
||||
// check that / exists
|
||||
Path path = new Path("/");
|
||||
|
@ -192,6 +200,7 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
|
|||
// use a small block size and a large write so that DN is busy creating
|
||||
// new blocks. This makes it almost 100% sure we can reproduce
|
||||
// case of client getting a DN that hasn't yet created the blocks
|
||||
@Test
|
||||
public void testImmediateReadOfNewFile()
|
||||
throws IOException {
|
||||
final int blockSize = 64 * 1024;
|
||||
|
@ -268,31 +277,39 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
|
|||
|
||||
// for some reason, using tranferTo evokes the race condition more often
|
||||
// so test separately
|
||||
@Test
|
||||
public void testUnfinishedBlockCRCErrorTransferTo() throws IOException {
|
||||
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, DEFAULT_WRITE_SIZE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
|
||||
throws IOException {
|
||||
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE);
|
||||
}
|
||||
|
||||
// fails due to issue w/append, disable
|
||||
@Ignore
|
||||
@Test
|
||||
public void _testUnfinishedBlockCRCErrorTransferToAppend()
|
||||
throws IOException {
|
||||
runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnfinishedBlockCRCErrorNormalTransfer() throws IOException {
|
||||
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, DEFAULT_WRITE_SIZE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
|
||||
throws IOException {
|
||||
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE);
|
||||
}
|
||||
|
||||
// fails due to issue w/append, disable
|
||||
@Ignore
|
||||
@Test
|
||||
public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
|
||||
throws IOException {
|
||||
runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
|
||||
|
@ -441,4 +458,4 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
|
|||
inputStream.close();
|
||||
return numRead + startPos - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -498,6 +499,8 @@ public class TestFileCreation {
|
|||
* This test is currently not triggered because more HDFS work is
|
||||
* is needed to handle persistent leases.
|
||||
*/
|
||||
@Ignore
|
||||
@Test
|
||||
public void xxxtestFileCreationNamenodeRestart() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
final int MAX_IDLE_TIME = 2000; // 2s
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocolPB;
|
||||
|
||||
import static junit.framework.Assert.assertEquals;
|
||||
import static junit.framework.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -80,6 +80,12 @@ import com.google.common.collect.Lists;
|
|||
* Tests for {@link PBHelper}
|
||||
*/
|
||||
public class TestPBHelper {
|
||||
|
||||
/**
|
||||
* Used for asserting equality on doubles.
|
||||
*/
|
||||
private static final double DELTA = 0.000001;
|
||||
|
||||
@Test
|
||||
public void testConvertNamenodeRole() {
|
||||
assertEquals(NamenodeRoleProto.BACKUP,
|
||||
|
@ -284,11 +290,12 @@ public class TestPBHelper {
|
|||
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
|
||||
assertEquals(dn1.getAdminState(), dn2.getAdminState());
|
||||
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
|
||||
assertEquals(dn1.getBlockPoolUsedPercent(), dn2.getBlockPoolUsedPercent());
|
||||
assertEquals(dn1.getBlockPoolUsedPercent(),
|
||||
dn2.getBlockPoolUsedPercent(), DELTA);
|
||||
assertEquals(dn1.getCapacity(), dn2.getCapacity());
|
||||
assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
|
||||
assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
|
||||
assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
|
||||
assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent(), DELTA);
|
||||
assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
|
||||
assertEquals(dn1.getHostName(), dn2.getHostName());
|
||||
assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
|
||||
|
|
|
@ -17,13 +17,16 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -33,27 +36,23 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
|||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* this class tests the methods of the SimulatedFSDataset.
|
||||
*/
|
||||
public class TestSimulatedFSDataset extends TestCase {
|
||||
public class TestSimulatedFSDataset {
|
||||
Configuration conf = null;
|
||||
static final String bpid = "BP-TEST";
|
||||
static final int NUMBLOCKS = 20;
|
||||
static final int BLOCK_LENGTH_MULTIPLIER = 79;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new HdfsConfiguration();
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
long blockIdToLen(long blkid) {
|
||||
return blkid*BLOCK_LENGTH_MULTIPLIER;
|
||||
|
@ -90,6 +89,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
return addSomeBlocks(fsdataset, 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFSDatasetFactory() {
|
||||
final Configuration conf = new Configuration();
|
||||
FsDatasetSpi.Factory<?> f = FsDatasetSpi.Factory.getFactory(conf);
|
||||
|
@ -102,6 +102,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
assertTrue(s.isSimulated());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetMetaData() throws IOException {
|
||||
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
|
||||
|
@ -123,6 +124,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testStorageUsage() throws IOException {
|
||||
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
assertEquals(fsdataset.getDfsUsed(), 0);
|
||||
|
@ -146,6 +148,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
assertEquals(expectedLen, lengthRead);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteRead() throws IOException {
|
||||
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
addSomeBlocks(fsdataset);
|
||||
|
@ -157,6 +160,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetBlockReport() throws IOException {
|
||||
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
|
||||
|
@ -170,6 +174,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInjectionEmpty() throws IOException {
|
||||
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
|
||||
|
@ -198,6 +203,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInjectionNonEmpty() throws IOException {
|
||||
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
|
||||
|
@ -271,6 +277,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInValidBlocks() throws IOException {
|
||||
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
|
||||
|
@ -282,6 +289,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
|||
checkInvalidBlock(b);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidate() throws IOException {
|
||||
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
|
||||
int bytesAdded = addSomeBlocks(fsdataset);
|
||||
|
|
|
@ -20,6 +20,12 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
@ -31,8 +37,6 @@ import java.util.Collection;
|
|||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -69,6 +73,8 @@ import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
|||
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.ArgumentMatcher;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
|
@ -84,7 +90,7 @@ import com.google.common.primitives.Ints;
|
|||
/**
|
||||
* This class tests the creation and validation of a checkpoint.
|
||||
*/
|
||||
public class TestCheckpoint extends TestCase {
|
||||
public class TestCheckpoint {
|
||||
|
||||
static {
|
||||
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
|
||||
|
@ -100,7 +106,7 @@ public class TestCheckpoint extends TestCase {
|
|||
|
||||
private CheckpointFaultInjector faultInjector;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
|
||||
|
||||
|
@ -139,6 +145,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/*
|
||||
* Verify that namenode does not startup if one namedir is bad.
|
||||
*/
|
||||
@Test
|
||||
public void testNameDirError() throws IOException {
|
||||
LOG.info("Starting testNameDirError");
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
@ -180,6 +187,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* correctly (by removing the storage directory)
|
||||
* See https://issues.apache.org/jira/browse/HDFS-2011
|
||||
*/
|
||||
@Test
|
||||
public void testWriteTransactionIdHandlesIOE() throws Exception {
|
||||
LOG.info("Check IOException handled correctly by writeTransactionIdFile");
|
||||
ArrayList<URI> fsImageDirs = new ArrayList<URI>();
|
||||
|
@ -214,6 +222,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/*
|
||||
* Simulate namenode crashing after rolling edit log.
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryNamenodeError1()
|
||||
throws IOException {
|
||||
LOG.info("Starting testSecondaryNamenodeError1");
|
||||
|
@ -279,6 +288,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/*
|
||||
* Simulate a namenode crash after uploading new image
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryNamenodeError2() throws IOException {
|
||||
LOG.info("Starting testSecondaryNamenodeError2");
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
@ -340,6 +350,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/*
|
||||
* Simulate a secondary namenode crash after rolling the edit log.
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryNamenodeError3() throws IOException {
|
||||
LOG.info("Starting testSecondaryNamenodeError3");
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
@ -412,6 +423,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* back to the name-node.
|
||||
* Used to truncate primary fsimage file.
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryFailsToReturnImage() throws IOException {
|
||||
Mockito.doThrow(new IOException("If this exception is not caught by the " +
|
||||
"name-node, fs image will be truncated."))
|
||||
|
@ -425,6 +437,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* before even setting the length header. This used to cause image
|
||||
* truncation. Regression test for HDFS-3330.
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryFailsWithErrorBeforeSettingHeaders()
|
||||
throws IOException {
|
||||
Mockito.doThrow(new Error("If this exception is not caught by the " +
|
||||
|
@ -497,6 +510,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* The length header in the HTTP transfer should prevent
|
||||
* this from corrupting the NN.
|
||||
*/
|
||||
@Test
|
||||
public void testNameNodeImageSendFailWrongSize()
|
||||
throws IOException {
|
||||
LOG.info("Starting testNameNodeImageSendFailWrongSize");
|
||||
|
@ -511,6 +525,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* The digest header in the HTTP transfer should prevent
|
||||
* this from corrupting the NN.
|
||||
*/
|
||||
@Test
|
||||
public void testNameNodeImageSendFailWrongDigest()
|
||||
throws IOException {
|
||||
LOG.info("Starting testNameNodeImageSendFailWrongDigest");
|
||||
|
@ -528,7 +543,7 @@ public class TestCheckpoint extends TestCase {
|
|||
private void doSendFailTest(String exceptionSubstring)
|
||||
throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
Path file1 = new Path("checkpoint-doSendFailTest-" + getName() + ".dat");
|
||||
Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat");
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(numDatanodes)
|
||||
.build();
|
||||
|
@ -574,6 +589,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* Test that the NN locks its storage and edits directories, and won't start up
|
||||
* if the directories are already locked
|
||||
**/
|
||||
@Test
|
||||
public void testNameDirLocking() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
|
@ -603,6 +619,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* Test that, if the edits dir is separate from the name dir, it is
|
||||
* properly locked.
|
||||
**/
|
||||
@Test
|
||||
public void testSeparateEditsDirLocking() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
|
||||
|
@ -638,6 +655,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/**
|
||||
* Test that the SecondaryNameNode properly locks its storage directories.
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryNameNodeLocking() throws Exception {
|
||||
// Start a primary NN so that the secondary will start successfully
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
@ -687,6 +705,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* Test that, an attempt to lock a storage that is already locked by a nodename,
|
||||
* logs error message that includes JVM name of the namenode that locked it.
|
||||
*/
|
||||
@Test
|
||||
public void testStorageAlreadyLockedErrorMessage() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
|
@ -763,6 +782,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* 2. if the NN does not contain an image, importing a checkpoint
|
||||
* succeeds and re-saves the image
|
||||
*/
|
||||
@Test
|
||||
public void testImportCheckpoint() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
Path testPath = new Path("/testfile");
|
||||
|
@ -861,6 +881,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/**
|
||||
* Tests checkpoint in HDFS.
|
||||
*/
|
||||
@Test
|
||||
public void testCheckpoint() throws IOException {
|
||||
Path file1 = new Path("checkpoint.dat");
|
||||
Path file2 = new Path("checkpoint2.dat");
|
||||
|
@ -951,6 +972,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/**
|
||||
* Tests save namespace.
|
||||
*/
|
||||
@Test
|
||||
public void testSaveNamespace() throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
DistributedFileSystem fs = null;
|
||||
|
@ -1057,6 +1079,7 @@ public class TestCheckpoint extends TestCase {
|
|||
|
||||
/* Test case to test CheckpointSignature */
|
||||
@SuppressWarnings("deprecation")
|
||||
@Test
|
||||
public void testCheckpointSignature() throws IOException {
|
||||
|
||||
MiniDFSCluster cluster = null;
|
||||
|
@ -1091,6 +1114,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* - it then fails again for the same reason
|
||||
* - it then tries to checkpoint a third time
|
||||
*/
|
||||
@Test
|
||||
public void testCheckpointAfterTwoFailedUploads() throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
SecondaryNameNode secondary = null;
|
||||
|
@ -1147,6 +1171,7 @@ public class TestCheckpoint extends TestCase {
|
|||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleSecondaryNamenodes() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
String nameserviceId1 = "ns1";
|
||||
|
@ -1197,6 +1222,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* Test that the secondary doesn't have to re-download image
|
||||
* if it hasn't changed.
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryImageDownload() throws IOException {
|
||||
LOG.info("Starting testSecondaryImageDownload");
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
@ -1279,6 +1305,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* It verifies that this works even though the earlier-txid checkpoint gets
|
||||
* uploaded after the later-txid checkpoint.
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
||||
|
@ -1364,6 +1391,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* It verifies that one of the two gets an error that it's uploading a
|
||||
* duplicate checkpoint, and the other one succeeds.
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
||||
|
@ -1457,6 +1485,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* is running. The secondary should shut itself down if if talks to a NN
|
||||
* with the wrong namespace.
|
||||
*/
|
||||
@Test
|
||||
public void testReformatNNBetweenCheckpoints() throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
SecondaryNameNode secondary = null;
|
||||
|
@ -1514,6 +1543,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* Test that the primary NN will not serve any files to a 2NN who doesn't
|
||||
* share its namespace ID, and also will not accept any files from one.
|
||||
*/
|
||||
@Test
|
||||
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
|
||||
|
@ -1575,6 +1605,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* the non-failed storage directory receives the checkpoint.
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
@Test
|
||||
public void testCheckpointWithFailedStorageDir() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
SecondaryNameNode secondary = null;
|
||||
|
@ -1639,6 +1670,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* @throws Exception
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
@Test
|
||||
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
SecondaryNameNode secondary = null;
|
||||
|
@ -1711,6 +1743,7 @@ public class TestCheckpoint extends TestCase {
|
|||
/**
|
||||
* Test that the 2NN triggers a checkpoint after the configurable interval
|
||||
*/
|
||||
@Test
|
||||
public void testCheckpointTriggerOnTxnCount() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
SecondaryNameNode secondary = null;
|
||||
|
@ -1764,6 +1797,7 @@ public class TestCheckpoint extends TestCase {
|
|||
* logs that connect the 2NN's old checkpoint to the current txid
|
||||
* get archived. Then, the 2NN tries to checkpoint again.
|
||||
*/
|
||||
@Test
|
||||
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
SecondaryNameNode secondary = null;
|
||||
|
@ -1801,6 +1835,7 @@ public class TestCheckpoint extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCommandLineParsing() throws ParseException {
|
||||
SecondaryNameNode.CommandLineOpts opts =
|
||||
new SecondaryNameNode.CommandLineOpts();
|
||||
|
|
|
@ -29,8 +29,6 @@ import java.util.Map;
|
|||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
@ -42,6 +40,12 @@ import org.mortbay.util.ajax.JSON;
|
|||
* Class for testing {@link NameNodeMXBean} implementation
|
||||
*/
|
||||
public class TestNameNodeMXBean {
|
||||
|
||||
/**
|
||||
* Used to assert equality between doubles
|
||||
*/
|
||||
private static final double DELTA = 0.000001;
|
||||
|
||||
@SuppressWarnings({ "unchecked", "deprecation" })
|
||||
@Test
|
||||
public void testNameNodeMXBeanInfo() throws Exception {
|
||||
|
@ -59,36 +63,36 @@ public class TestNameNodeMXBean {
|
|||
"Hadoop:service=NameNode,name=NameNodeInfo");
|
||||
// get attribute "ClusterId"
|
||||
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
|
||||
Assert.assertEquals(fsn.getClusterId(), clusterId);
|
||||
assertEquals(fsn.getClusterId(), clusterId);
|
||||
// get attribute "BlockPoolId"
|
||||
String blockpoolId = (String) mbs.getAttribute(mxbeanName,
|
||||
"BlockPoolId");
|
||||
Assert.assertEquals(fsn.getBlockPoolId(), blockpoolId);
|
||||
assertEquals(fsn.getBlockPoolId(), blockpoolId);
|
||||
// get attribute "Version"
|
||||
String version = (String) mbs.getAttribute(mxbeanName, "Version");
|
||||
Assert.assertEquals(fsn.getVersion(), version);
|
||||
Assert.assertTrue(version.equals(VersionInfo.getVersion()
|
||||
assertEquals(fsn.getVersion(), version);
|
||||
assertTrue(version.equals(VersionInfo.getVersion()
|
||||
+ ", r" + VersionInfo.getRevision()));
|
||||
// get attribute "Used"
|
||||
Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
|
||||
Assert.assertEquals(fsn.getUsed(), used.longValue());
|
||||
assertEquals(fsn.getUsed(), used.longValue());
|
||||
// get attribute "Total"
|
||||
Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
|
||||
Assert.assertEquals(fsn.getTotal(), total.longValue());
|
||||
assertEquals(fsn.getTotal(), total.longValue());
|
||||
// get attribute "safemode"
|
||||
String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
|
||||
Assert.assertEquals(fsn.getSafemode(), safemode);
|
||||
assertEquals(fsn.getSafemode(), safemode);
|
||||
// get attribute nondfs
|
||||
Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
|
||||
Assert.assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
|
||||
assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
|
||||
// get attribute percentremaining
|
||||
Float percentremaining = (Float) (mbs.getAttribute(mxbeanName,
|
||||
"PercentRemaining"));
|
||||
Assert.assertEquals(fsn.getPercentRemaining(), percentremaining
|
||||
.floatValue());
|
||||
assertEquals(fsn.getPercentRemaining(), percentremaining
|
||||
.floatValue(), DELTA);
|
||||
// get attribute Totalblocks
|
||||
Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
|
||||
Assert.assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
|
||||
assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
|
||||
// get attribute alivenodeinfo
|
||||
String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName,
|
||||
"LiveNodes"));
|
||||
|
@ -103,15 +107,15 @@ public class TestNameNodeMXBean {
|
|||
assertTrue(liveNode.containsKey("numBlocks"));
|
||||
assertTrue(((Long)liveNode.get("numBlocks")) == 0);
|
||||
}
|
||||
Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo);
|
||||
assertEquals(fsn.getLiveNodes(), alivenodeinfo);
|
||||
// get attribute deadnodeinfo
|
||||
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
|
||||
"DeadNodes"));
|
||||
Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
|
||||
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
|
||||
// get attribute NameDirStatuses
|
||||
String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
|
||||
"NameDirStatuses"));
|
||||
Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
|
||||
assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
|
||||
Map<String, Map<String, String>> statusMap =
|
||||
(Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
|
||||
Collection<URI> nameDirUris = cluster.getNameDirs(0);
|
||||
|
|
|
@ -26,8 +26,6 @@ import java.io.File;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.AssertionFailedError;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -132,7 +130,7 @@ public class TestParallelImageWrite {
|
|||
* @param fsn - the FSNamesystem being checked.
|
||||
* @param numImageDirs - the configured number of StorageDirectory of type IMAGE.
|
||||
* @return - the md5 hash of the most recent FSImage files, which must all be the same.
|
||||
* @throws AssertionFailedError if image files are empty or different,
|
||||
* @throws AssertionError if image files are empty or different,
|
||||
* if less than two StorageDirectory are provided, or if the
|
||||
* actual number of StorageDirectory is less than configured.
|
||||
*/
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static junit.framework.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
|
Loading…
Reference in New Issue