HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)
This commit is contained in:
parent
5459b241c8
commit
8511d80804
|
@ -448,6 +448,8 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-8165. Move GRANDFATHER_GENERATION_STAMP and GRANDFATER_INODE_ID to
|
||||
hdfs-client. (wheat9)
|
||||
|
||||
HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
||||
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
|
||||
import static org.apache.hadoop.fs.CreateFlag.CREATE;
|
||||
import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
|
||||
import static org.apache.hadoop.fs.StorageType.DEFAULT;
|
||||
|
@ -45,6 +47,7 @@ import java.util.List;
|
|||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -228,11 +231,15 @@ public abstract class LazyPersistTestCase {
|
|||
* If ramDiskStorageLimit is >=0, then RAM_DISK capacity is artificially
|
||||
* capped. If ramDiskStorageLimit < 0 then it is ignored.
|
||||
*/
|
||||
protected final void startUpCluster(boolean hasTransientStorage,
|
||||
final int ramDiskReplicaCapacity,
|
||||
final boolean useSCR,
|
||||
final boolean useLegacyBlockReaderLocal)
|
||||
throws IOException {
|
||||
protected final void startUpCluster(
|
||||
int numDatanodes,
|
||||
boolean hasTransientStorage,
|
||||
StorageType[] storageTypes,
|
||||
int ramDiskReplicaCapacity,
|
||||
long ramDiskStorageLimit,
|
||||
long evictionLowWatermarkReplicas,
|
||||
boolean useSCR,
|
||||
boolean useLegacyBlockReaderLocal) throws IOException {
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
|
@ -243,17 +250,17 @@ public abstract class LazyPersistTestCase {
|
|||
HEARTBEAT_RECHECK_INTERVAL_MSEC);
|
||||
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
|
||||
LAZY_WRITER_INTERVAL_SEC);
|
||||
conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
|
||||
EVICTION_LOW_WATERMARK * BLOCK_SIZE);
|
||||
conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
|
||||
evictionLowWatermarkReplicas * BLOCK_SIZE);
|
||||
|
||||
if (useSCR) {
|
||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
||||
// Do not share a client context across tests.
|
||||
conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
|
||||
conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||
if (useLegacyBlockReaderLocal) {
|
||||
conf.setBoolean(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
||||
conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||
} else {
|
||||
sockDir = new TemporarySocketDirectory();
|
||||
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
|
||||
|
@ -261,22 +268,29 @@ public abstract class LazyPersistTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
long[] capacities = null;
|
||||
Preconditions.checkState(
|
||||
ramDiskReplicaCapacity < 0 || ramDiskStorageLimit < 0,
|
||||
"Cannot specify non-default values for both ramDiskReplicaCapacity "
|
||||
+ "and ramDiskStorageLimit");
|
||||
|
||||
long[] capacities;
|
||||
if (hasTransientStorage && ramDiskReplicaCapacity >= 0) {
|
||||
// Convert replica count to byte count, add some delta for .meta and
|
||||
// VERSION files.
|
||||
long ramDiskStorageLimit = ((long) ramDiskReplicaCapacity * BLOCK_SIZE) +
|
||||
ramDiskStorageLimit = ((long) ramDiskReplicaCapacity * BLOCK_SIZE) +
|
||||
(BLOCK_SIZE - 1);
|
||||
capacities = new long[] { ramDiskStorageLimit, -1 };
|
||||
}
|
||||
capacities = new long[] { ramDiskStorageLimit, -1 };
|
||||
|
||||
cluster = new MiniDFSCluster
|
||||
.Builder(conf)
|
||||
.numDataNodes(REPL_FACTOR)
|
||||
.numDataNodes(numDatanodes)
|
||||
.storageCapacities(capacities)
|
||||
.storageTypes(hasTransientStorage ?
|
||||
new StorageType[]{ RAM_DISK, DEFAULT } : null)
|
||||
.storageTypes(storageTypes != null ? storageTypes :
|
||||
(hasTransientStorage ? new StorageType[]{RAM_DISK, DEFAULT} : null))
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
fs = cluster.getFileSystem();
|
||||
client = fs.getClient();
|
||||
try {
|
||||
|
@ -287,65 +301,77 @@ public abstract class LazyPersistTestCase {
|
|||
LOG.info("Cluster startup complete");
|
||||
}
|
||||
|
||||
/**
|
||||
* If ramDiskStorageLimit is >=0, then RAM_DISK capacity is artificially
|
||||
* capped. If ramDiskStorageLimit < 0 then it is ignored.
|
||||
*/
|
||||
protected final void startUpCluster(final int numDataNodes,
|
||||
final StorageType[] storageTypes,
|
||||
final long ramDiskStorageLimit,
|
||||
final boolean useSCR)
|
||||
throws IOException {
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
|
||||
LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC);
|
||||
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL_SEC);
|
||||
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||
HEARTBEAT_RECHECK_INTERVAL_MSEC);
|
||||
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
|
||||
LAZY_WRITER_INTERVAL_SEC);
|
||||
|
||||
if (useSCR)
|
||||
{
|
||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,useSCR);
|
||||
conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
|
||||
sockDir = new TemporarySocketDirectory();
|
||||
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
|
||||
this.getClass().getSimpleName() + "._PORT.sock").getAbsolutePath());
|
||||
conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||
}
|
||||
|
||||
cluster = new MiniDFSCluster
|
||||
.Builder(conf)
|
||||
.numDataNodes(numDataNodes)
|
||||
.storageTypes(storageTypes != null ?
|
||||
storageTypes : new StorageType[] { DEFAULT, DEFAULT })
|
||||
.build();
|
||||
fs = cluster.getFileSystem();
|
||||
client = fs.getClient();
|
||||
|
||||
// Artificially cap the storage capacity of the RAM_DISK volume.
|
||||
if (ramDiskStorageLimit >= 0) {
|
||||
List<? extends FsVolumeSpi> volumes =
|
||||
cluster.getDataNodes().get(0).getFSDataset().getVolumes();
|
||||
|
||||
for (FsVolumeSpi volume : volumes) {
|
||||
if (volume.getStorageType() == RAM_DISK) {
|
||||
((FsVolumeImpl) volume).setCapacityForTesting(ramDiskStorageLimit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOG.info("Cluster startup complete");
|
||||
ClusterWithRamDiskBuilder getClusterBuilder() {
|
||||
return new ClusterWithRamDiskBuilder();
|
||||
}
|
||||
|
||||
protected final void startUpCluster(boolean hasTransientStorage,
|
||||
final int ramDiskReplicaCapacity)
|
||||
throws IOException {
|
||||
startUpCluster(hasTransientStorage, ramDiskReplicaCapacity, false, false);
|
||||
/**
|
||||
* Builder class that allows controlling RAM disk-specific properties for a
|
||||
* MiniDFSCluster.
|
||||
*/
|
||||
class ClusterWithRamDiskBuilder {
|
||||
public ClusterWithRamDiskBuilder setNumDatanodes(
|
||||
int numDatanodes) {
|
||||
this.numDatanodes = numDatanodes;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterWithRamDiskBuilder setStorageTypes(
|
||||
StorageType[] storageTypes) {
|
||||
this.storageTypes = storageTypes;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterWithRamDiskBuilder setRamDiskReplicaCapacity(
|
||||
int ramDiskReplicaCapacity) {
|
||||
this.ramDiskReplicaCapacity = ramDiskReplicaCapacity;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterWithRamDiskBuilder setRamDiskStorageLimit(
|
||||
long ramDiskStorageLimit) {
|
||||
this.ramDiskStorageLimit = ramDiskStorageLimit;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterWithRamDiskBuilder setUseScr(boolean useScr) {
|
||||
this.useScr = useScr;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterWithRamDiskBuilder setHasTransientStorage(
|
||||
boolean hasTransientStorage) {
|
||||
this.hasTransientStorage = hasTransientStorage;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterWithRamDiskBuilder setUseLegacyBlockReaderLocal(
|
||||
boolean useLegacyBlockReaderLocal) {
|
||||
this.useLegacyBlockReaderLocal = useLegacyBlockReaderLocal;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterWithRamDiskBuilder setEvictionLowWatermarkReplicas(
|
||||
long evictionLowWatermarkReplicas) {
|
||||
this.evictionLowWatermarkReplicas = evictionLowWatermarkReplicas;
|
||||
return this;
|
||||
}
|
||||
|
||||
public void build() throws IOException {
|
||||
LazyPersistTestCase.this.startUpCluster(
|
||||
numDatanodes, hasTransientStorage, storageTypes, ramDiskReplicaCapacity,
|
||||
ramDiskStorageLimit, evictionLowWatermarkReplicas,
|
||||
useScr, useLegacyBlockReaderLocal);
|
||||
}
|
||||
|
||||
private int numDatanodes = REPL_FACTOR;
|
||||
private StorageType[] storageTypes = null;
|
||||
private int ramDiskReplicaCapacity = -1;
|
||||
private long ramDiskStorageLimit = -1;
|
||||
private boolean hasTransientStorage = true;
|
||||
private boolean useScr = false;
|
||||
private boolean useLegacyBlockReaderLocal = false;
|
||||
private long evictionLowWatermarkReplicas = EVICTION_LOW_WATERMARK;
|
||||
}
|
||||
|
||||
protected final void triggerBlockReport()
|
||||
|
|
|
@ -19,11 +19,6 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
|||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -34,17 +29,12 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.apache.hadoop.fs.StorageType.DEFAULT;
|
||||
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.hamcrest.core.IsNot.not;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class TestLazyPersistFiles extends LazyPersistTestCase {
|
||||
|
@ -56,7 +46,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
|
|||
*/
|
||||
@Test
|
||||
public void testAppendIsDenied() throws IOException {
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -77,7 +67,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
|
|||
*/
|
||||
@Test
|
||||
public void testTruncateIsDenied() throws IOException {
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -99,7 +89,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testCorruptFilesAreDiscarded()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(true, 2);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
|
||||
|
||||
|
@ -136,7 +126,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testConcurrentRead()
|
||||
throws Exception {
|
||||
startUpCluster(true, 2);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
final Path path1 = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -187,7 +177,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testConcurrentWrites()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(true, 9);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(9).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
final int SEED = 0xFADED;
|
||||
final int NUM_WRITERS = 4;
|
||||
|
|
|
@ -34,7 +34,7 @@ import static org.junit.Assert.assertThat;
|
|||
public class TestLazyPersistPolicy extends LazyPersistTestCase {
|
||||
@Test
|
||||
public void testPolicyNotSetByDefault() throws IOException {
|
||||
startUpCluster(false, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -47,7 +47,7 @@ public class TestLazyPersistPolicy extends LazyPersistTestCase {
|
|||
|
||||
@Test
|
||||
public void testPolicyPropagation() throws IOException {
|
||||
startUpCluster(false, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -59,7 +59,7 @@ public class TestLazyPersistPolicy extends LazyPersistTestCase {
|
|||
|
||||
@Test
|
||||
public void testPolicyPersistenceInEditLog() throws IOException {
|
||||
startUpCluster(false, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -73,7 +73,7 @@ public class TestLazyPersistPolicy extends LazyPersistTestCase {
|
|||
|
||||
@Test
|
||||
public void testPolicyPersistenceInFsImage() throws IOException {
|
||||
startUpCluster(false, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import static org.junit.Assert.fail;
|
|||
public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
|
||||
@Test
|
||||
public void testPlacementOnRamDisk() throws IOException {
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -43,7 +43,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
|
|||
|
||||
@Test
|
||||
public void testPlacementOnSizeLimitedRamDisk() throws IOException {
|
||||
startUpCluster(true, 3);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(3).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
|
||||
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
|
||||
|
@ -62,7 +62,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
|
|||
*/
|
||||
@Test
|
||||
public void testFallbackToDisk() throws IOException {
|
||||
startUpCluster(false, -1);
|
||||
getClusterBuilder().setHasTransientStorage(false).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -76,7 +76,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
|
|||
*/
|
||||
@Test
|
||||
public void testFallbackToDiskFull() throws Exception {
|
||||
startUpCluster(false, 0);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(0).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -95,7 +95,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testFallbackToDiskPartial()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(true, 2);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -134,7 +134,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
|
|||
*/
|
||||
@Test
|
||||
public void testRamDiskNotChosenByDefault() throws IOException {
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
|
|||
public void testDnRestartWithSavedReplicas()
|
||||
throws IOException, InterruptedException {
|
||||
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
|
||||
|
||||
|
@ -57,7 +57,7 @@ public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
|
|||
public void testDnRestartWithUnsavedReplicas()
|
||||
throws IOException, InterruptedException {
|
||||
|
||||
startUpCluster(true, 1);
|
||||
getClusterBuilder().build();
|
||||
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
||||
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
|
|
|
@ -39,7 +39,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testLazyPersistBlocksAreSaved()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -63,7 +63,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
*/
|
||||
@Test
|
||||
public void testRamDiskEviction() throws Exception {
|
||||
startUpCluster(true, 1 + EVICTION_LOW_WATERMARK);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
|
||||
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
|
||||
|
@ -99,7 +99,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testRamDiskEvictionBeforePersist()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(true, 1);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
|
||||
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
|
||||
|
@ -133,7 +133,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
public void testRamDiskEvictionIsLru()
|
||||
throws Exception {
|
||||
final int NUM_PATHS = 5;
|
||||
startUpCluster(true, NUM_PATHS + EVICTION_LOW_WATERMARK);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(NUM_PATHS + EVICTION_LOW_WATERMARK).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path paths[] = new Path[NUM_PATHS * 2];
|
||||
|
||||
|
@ -194,7 +194,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testDeleteBeforePersist()
|
||||
throws Exception {
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
||||
|
||||
|
@ -221,7 +221,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testDeleteAfterPersist()
|
||||
throws Exception {
|
||||
startUpCluster(true, -1);
|
||||
getClusterBuilder().build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
@ -249,7 +249,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testDfsUsageCreateDelete()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(true, 4);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(4).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
||||
|
|
|
@ -70,9 +70,11 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testRamDiskShortCircuitRead()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(REPL_FACTOR,
|
||||
new StorageType[]{RAM_DISK, DEFAULT},
|
||||
2 * BLOCK_SIZE - 1, true); // 1 replica + delta, SCR read
|
||||
getClusterBuilder().setNumDatanodes(REPL_FACTOR)
|
||||
.setStorageTypes(new StorageType[]{RAM_DISK, DEFAULT})
|
||||
.setRamDiskStorageLimit(2 * BLOCK_SIZE - 1)
|
||||
.setUseScr(true)
|
||||
.build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
final int SEED = 0xFADED;
|
||||
Path path = new Path("/" + METHOD_NAME + ".dat");
|
||||
|
@ -111,8 +113,14 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
|
|||
@Test
|
||||
public void testRamDiskEvictionWithShortCircuitReadHandle()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(REPL_FACTOR, new StorageType[] { RAM_DISK, DEFAULT },
|
||||
(6 * BLOCK_SIZE -1), true); // 5 replica + delta, SCR.
|
||||
// 5 replica + delta, SCR.
|
||||
getClusterBuilder().setNumDatanodes(REPL_FACTOR)
|
||||
.setStorageTypes(new StorageType[]{RAM_DISK, DEFAULT})
|
||||
.setRamDiskStorageLimit(6 * BLOCK_SIZE - 1)
|
||||
.setEvictionLowWatermarkReplicas(3)
|
||||
.setUseScr(true)
|
||||
.build();
|
||||
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
|
||||
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
|
||||
|
@ -156,14 +164,20 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
|
|||
public void testShortCircuitReadAfterEviction()
|
||||
throws IOException, InterruptedException {
|
||||
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
|
||||
startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, false);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
|
||||
.setUseScr(true)
|
||||
.setUseLegacyBlockReaderLocal(false)
|
||||
.build();
|
||||
doShortCircuitReadAfterEvictionTest();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLegacyShortCircuitReadAfterEviction()
|
||||
throws IOException, InterruptedException {
|
||||
startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, true);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
|
||||
.setUseScr(true)
|
||||
.setUseLegacyBlockReaderLocal(true)
|
||||
.build();
|
||||
doShortCircuitReadAfterEvictionTest();
|
||||
}
|
||||
|
||||
|
@ -220,14 +234,20 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
|
|||
public void testShortCircuitReadBlockFileCorruption() throws IOException,
|
||||
InterruptedException {
|
||||
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
|
||||
startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, false);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
|
||||
.setUseScr(true)
|
||||
.setUseLegacyBlockReaderLocal(false)
|
||||
.build();
|
||||
doShortCircuitReadBlockFileCorruptionTest();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLegacyShortCircuitReadBlockFileCorruption() throws IOException,
|
||||
InterruptedException {
|
||||
startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, true);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
|
||||
.setUseScr(true)
|
||||
.setUseLegacyBlockReaderLocal(true)
|
||||
.build();
|
||||
doShortCircuitReadBlockFileCorruptionTest();
|
||||
}
|
||||
|
||||
|
@ -260,14 +280,20 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
|
|||
public void testShortCircuitReadMetaFileCorruption() throws IOException,
|
||||
InterruptedException {
|
||||
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
|
||||
startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, false);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
|
||||
.setUseScr(true)
|
||||
.setUseLegacyBlockReaderLocal(false)
|
||||
.build();
|
||||
doShortCircuitReadMetaFileCorruptionTest();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLegacyShortCircuitReadMetaFileCorruption() throws IOException,
|
||||
InterruptedException {
|
||||
startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, true);
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
|
||||
.setUseScr(true)
|
||||
.setUseLegacyBlockReaderLocal(true)
|
||||
.build();
|
||||
doShortCircuitReadMetaFileCorruptionTest();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue