HDFS-15988. Stabilise HDFS Pre-Commit. (#2860). Contributed by Ayush Saxena.
Signed-off-by: He Xiaoqiao <hexiaoqiao@apache.org>
This commit is contained in:
parent
344cacce23
commit
fcd4140e5f
|
@ -355,6 +355,7 @@ function personality_modules
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
unit)
|
unit)
|
||||||
|
extra="-Dsurefire.rerunFailingTestsCount=2"
|
||||||
if [[ "${BUILDMODE}" = full ]]; then
|
if [[ "${BUILDMODE}" = full ]]; then
|
||||||
ordering=mvnsrc
|
ordering=mvnsrc
|
||||||
elif [[ "${CHANGED_MODULES[*]}" =~ \. ]]; then
|
elif [[ "${CHANGED_MODULES[*]}" =~ \. ]]; then
|
||||||
|
@ -363,7 +364,7 @@ function personality_modules
|
||||||
|
|
||||||
if [[ ${TEST_PARALLEL} = "true" ]] ; then
|
if [[ ${TEST_PARALLEL} = "true" ]] ; then
|
||||||
if hadoop_test_parallel; then
|
if hadoop_test_parallel; then
|
||||||
extra="-Pparallel-tests"
|
extra="${extra} -Pparallel-tests"
|
||||||
if [[ -n ${TEST_THREADS:-} ]]; then
|
if [[ -n ${TEST_THREADS:-} ]]; then
|
||||||
extra="${extra} -DtestsThreadCount=${TEST_THREADS}"
|
extra="${extra} -DtestsThreadCount=${TEST_THREADS}"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -179,7 +179,7 @@ RUN mkdir -p /opt/isa-l-src \
|
||||||
###
|
###
|
||||||
# Avoid out of memory errors in builds
|
# Avoid out of memory errors in builds
|
||||||
###
|
###
|
||||||
ENV MAVEN_OPTS -Xms256m -Xmx1536m
|
ENV MAVEN_OPTS -Xms256m -Xmx3072m
|
||||||
|
|
||||||
# Skip gpg verification when downloading Yetus via yetus-wrapper
|
# Skip gpg verification when downloading Yetus via yetus-wrapper
|
||||||
ENV HADOOP_SKIP_YETUS_VERIFICATION true
|
ENV HADOOP_SKIP_YETUS_VERIFICATION true
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
@ -97,12 +98,14 @@ public class TestPersistBlocks {
|
||||||
conf.setInt(
|
conf.setInt(
|
||||||
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
|
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
|
||||||
0);
|
0);
|
||||||
|
conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
long len = 0;
|
long len = 0;
|
||||||
FSDataOutputStream stream;
|
FSDataOutputStream stream;
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
|
cluster.waitActive();
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
// Creating a file with 4096 blockSize to write multiple blocks
|
// Creating a file with 4096 blockSize to write multiple blocks
|
||||||
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
|
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
|
||||||
|
|
|
@ -91,7 +91,6 @@ import org.slf4j.event.Level;
|
||||||
public class TestDirectoryScanner {
|
public class TestDirectoryScanner {
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(TestDirectoryScanner.class);
|
LoggerFactory.getLogger(TestDirectoryScanner.class);
|
||||||
private static final Configuration CONF = new HdfsConfiguration();
|
|
||||||
private static final int DEFAULT_GEN_STAMP = 9999;
|
private static final int DEFAULT_GEN_STAMP = 9999;
|
||||||
|
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
|
@ -103,12 +102,14 @@ public class TestDirectoryScanner {
|
||||||
private final Random r = new Random();
|
private final Random r = new Random();
|
||||||
private static final int BLOCK_LENGTH = 100;
|
private static final int BLOCK_LENGTH = 100;
|
||||||
|
|
||||||
static {
|
public Configuration getConfiguration() {
|
||||||
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_LENGTH);
|
Configuration configuration = new HdfsConfiguration();
|
||||||
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
|
configuration.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_LENGTH);
|
||||||
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
configuration.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
|
||||||
CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
configuration.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||||
|
configuration.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||||
getMemlockLimit(Long.MAX_VALUE));
|
getMemlockLimit(Long.MAX_VALUE));
|
||||||
|
return configuration;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -361,7 +362,8 @@ public class TestDirectoryScanner {
|
||||||
|
|
||||||
@Test(timeout = 300000)
|
@Test(timeout = 300000)
|
||||||
public void testRetainBlockOnPersistentStorage() throws Exception {
|
public void testRetainBlockOnPersistentStorage() throws Exception {
|
||||||
cluster = new MiniDFSCluster.Builder(CONF)
|
Configuration conf = getConfiguration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.storageTypes(
|
.storageTypes(
|
||||||
new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
|
new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
|
||||||
.numDataNodes(1).build();
|
.numDataNodes(1).build();
|
||||||
|
@ -370,7 +372,7 @@ public class TestDirectoryScanner {
|
||||||
bpid = cluster.getNamesystem().getBlockPoolId();
|
bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
||||||
client = cluster.getFileSystem().getClient();
|
client = cluster.getFileSystem().getClient();
|
||||||
scanner = new DirectoryScanner(fds, CONF);
|
scanner = new DirectoryScanner(fds, conf);
|
||||||
scanner.setRetainDiffs(true);
|
scanner.setRetainDiffs(true);
|
||||||
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
||||||
|
|
||||||
|
@ -413,8 +415,9 @@ public class TestDirectoryScanner {
|
||||||
new WriterAppender(new SimpleLayout(), loggerStream);
|
new WriterAppender(new SimpleLayout(), loggerStream);
|
||||||
rootLogger.addAppender(writerAppender);
|
rootLogger.addAppender(writerAppender);
|
||||||
|
|
||||||
|
Configuration conf = getConfiguration();
|
||||||
cluster = new MiniDFSCluster
|
cluster = new MiniDFSCluster
|
||||||
.Builder(CONF)
|
.Builder(conf)
|
||||||
.storageTypes(new StorageType[] {
|
.storageTypes(new StorageType[] {
|
||||||
StorageType.RAM_DISK, StorageType.DEFAULT })
|
StorageType.RAM_DISK, StorageType.DEFAULT })
|
||||||
.numDataNodes(1)
|
.numDataNodes(1)
|
||||||
|
@ -424,7 +427,7 @@ public class TestDirectoryScanner {
|
||||||
bpid = cluster.getNamesystem().getBlockPoolId();
|
bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
||||||
client = cluster.getFileSystem().getClient();
|
client = cluster.getFileSystem().getClient();
|
||||||
scanner = new DirectoryScanner(fds, CONF);
|
scanner = new DirectoryScanner(fds, conf);
|
||||||
scanner.setRetainDiffs(true);
|
scanner.setRetainDiffs(true);
|
||||||
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
||||||
|
|
||||||
|
@ -464,7 +467,8 @@ public class TestDirectoryScanner {
|
||||||
|
|
||||||
@Test(timeout = 300000)
|
@Test(timeout = 300000)
|
||||||
public void testDeleteBlockOnTransientStorage() throws Exception {
|
public void testDeleteBlockOnTransientStorage() throws Exception {
|
||||||
cluster = new MiniDFSCluster.Builder(CONF)
|
Configuration conf = getConfiguration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.storageTypes(
|
.storageTypes(
|
||||||
new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
|
new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
|
||||||
.numDataNodes(1).build();
|
.numDataNodes(1).build();
|
||||||
|
@ -473,7 +477,7 @@ public class TestDirectoryScanner {
|
||||||
bpid = cluster.getNamesystem().getBlockPoolId();
|
bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
||||||
client = cluster.getFileSystem().getClient();
|
client = cluster.getFileSystem().getClient();
|
||||||
scanner = new DirectoryScanner(fds, CONF);
|
scanner = new DirectoryScanner(fds, conf);
|
||||||
scanner.setRetainDiffs(true);
|
scanner.setRetainDiffs(true);
|
||||||
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
|
||||||
|
|
||||||
|
@ -512,16 +516,17 @@ public class TestDirectoryScanner {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void runTest(int parallelism) throws Exception {
|
public void runTest(int parallelism) throws Exception {
|
||||||
cluster = new MiniDFSCluster.Builder(CONF).build();
|
Configuration conf = getConfiguration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
bpid = cluster.getNamesystem().getBlockPoolId();
|
bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
||||||
client = cluster.getFileSystem().getClient();
|
client = cluster.getFileSystem().getClient();
|
||||||
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
|
||||||
parallelism);
|
parallelism);
|
||||||
|
|
||||||
scanner = new DirectoryScanner(fds, CONF);
|
scanner = new DirectoryScanner(fds, conf);
|
||||||
scanner.setRetainDiffs(true);
|
scanner.setRetainDiffs(true);
|
||||||
|
|
||||||
// Add files with 100 blocks
|
// Add files with 100 blocks
|
||||||
|
@ -672,9 +677,9 @@ public class TestDirectoryScanner {
|
||||||
*
|
*
|
||||||
* @throws Exception thrown on unexpected failure
|
* @throws Exception thrown on unexpected failure
|
||||||
*/
|
*/
|
||||||
@Test(timeout = 600000)
|
@Test
|
||||||
public void testThrottling() throws Exception {
|
public void testThrottling() throws Exception {
|
||||||
Configuration conf = new Configuration(CONF);
|
Configuration conf = new Configuration(getConfiguration());
|
||||||
|
|
||||||
// We need lots of blocks so the report compiler threads have enough to
|
// We need lots of blocks so the report compiler threads have enough to
|
||||||
// keep them busy while we watch them.
|
// keep them busy while we watch them.
|
||||||
|
@ -714,7 +719,7 @@ public class TestDirectoryScanner {
|
||||||
// Waiting should be about 9x running.
|
// Waiting should be about 9x running.
|
||||||
LOG.info("RATIO: " + ratio);
|
LOG.info("RATIO: " + ratio);
|
||||||
assertTrue("Throttle is too restrictive", ratio <= 10f);
|
assertTrue("Throttle is too restrictive", ratio <= 10f);
|
||||||
assertTrue("Throttle is too permissive", ratio >= 7f);
|
assertTrue("Throttle is too permissive" + ratio, ratio >= 7f);
|
||||||
|
|
||||||
// Test with a different limit
|
// Test with a different limit
|
||||||
conf.setInt(
|
conf.setInt(
|
||||||
|
@ -754,7 +759,7 @@ public class TestDirectoryScanner {
|
||||||
assertTrue("Throttle is too permissive", ratio >= 7f);
|
assertTrue("Throttle is too permissive", ratio >= 7f);
|
||||||
|
|
||||||
// Test with no limit
|
// Test with no limit
|
||||||
scanner = new DirectoryScanner(fds, CONF);
|
scanner = new DirectoryScanner(fds, getConfiguration());
|
||||||
scanner.setRetainDiffs(true);
|
scanner.setRetainDiffs(true);
|
||||||
scan(blocks, 0, 0, 0, 0, 0);
|
scan(blocks, 0, 0, 0, 0, 0);
|
||||||
scanner.shutdown();
|
scanner.shutdown();
|
||||||
|
@ -1095,13 +1100,14 @@ public class TestDirectoryScanner {
|
||||||
*/
|
*/
|
||||||
@Test(timeout = 60000)
|
@Test(timeout = 60000)
|
||||||
public void testExceptionHandlingWhileDirectoryScan() throws Exception {
|
public void testExceptionHandlingWhileDirectoryScan() throws Exception {
|
||||||
cluster = new MiniDFSCluster.Builder(CONF).build();
|
Configuration conf = getConfiguration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
bpid = cluster.getNamesystem().getBlockPoolId();
|
bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
||||||
client = cluster.getFileSystem().getClient();
|
client = cluster.getFileSystem().getClient();
|
||||||
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
|
||||||
|
|
||||||
// Add files with 2 blocks
|
// Add files with 2 blocks
|
||||||
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 2, false);
|
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 2, false);
|
||||||
|
@ -1121,7 +1127,7 @@ public class TestDirectoryScanner {
|
||||||
FsDatasetSpi<? extends FsVolumeSpi> spyFds = Mockito.spy(fds);
|
FsDatasetSpi<? extends FsVolumeSpi> spyFds = Mockito.spy(fds);
|
||||||
Mockito.doReturn(volReferences).when(spyFds).getFsVolumeReferences();
|
Mockito.doReturn(volReferences).when(spyFds).getFsVolumeReferences();
|
||||||
|
|
||||||
scanner = new DirectoryScanner(spyFds, CONF);
|
scanner = new DirectoryScanner(spyFds, conf);
|
||||||
scanner.setRetainDiffs(true);
|
scanner.setRetainDiffs(true);
|
||||||
scanner.reconcile();
|
scanner.reconcile();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -1135,7 +1141,7 @@ public class TestDirectoryScanner {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDirectoryScannerInFederatedCluster() throws Exception {
|
public void testDirectoryScannerInFederatedCluster() throws Exception {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration(CONF);
|
HdfsConfiguration conf = new HdfsConfiguration(getConfiguration());
|
||||||
// Create Federated cluster with two nameservices and one DN
|
// Create Federated cluster with two nameservices and one DN
|
||||||
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
|
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
|
||||||
|
|
|
@ -324,7 +324,7 @@ public class TestPipelinesFailover {
|
||||||
* DN running the recovery should then fail to commit the synchronization
|
* DN running the recovery should then fail to commit the synchronization
|
||||||
* and a later retry will succeed.
|
* and a later retry will succeed.
|
||||||
*/
|
*/
|
||||||
@Test(timeout=30000)
|
@Test(timeout=60000)
|
||||||
public void testFailoverRightBeforeCommitSynchronization() throws Exception {
|
public void testFailoverRightBeforeCommitSynchronization() throws Exception {
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
// Disable permissions so that another user can recover the lease.
|
// Disable permissions so that another user can recover the lease.
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature.SNAPSHOT_QUOTA_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_MAX_LIMIT;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -61,6 +61,7 @@ public class TestNestedSnapshots {
|
||||||
|
|
||||||
private static final short REPLICATION = 3;
|
private static final short REPLICATION = 3;
|
||||||
private static final long BLOCKSIZE = 1024;
|
private static final long BLOCKSIZE = 1024;
|
||||||
|
private static final int SNAPSHOTLIMIT = 100;
|
||||||
|
|
||||||
private static final Configuration conf = new Configuration();
|
private static final Configuration conf = new Configuration();
|
||||||
private static MiniDFSCluster cluster;
|
private static MiniDFSCluster cluster;
|
||||||
|
@ -68,6 +69,7 @@ public class TestNestedSnapshots {
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
|
conf.setInt(DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, SNAPSHOTLIMIT);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
||||||
.build();
|
.build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -199,7 +201,7 @@ public class TestNestedSnapshots {
|
||||||
* Test the snapshot limit of a single snapshottable directory.
|
* Test the snapshot limit of a single snapshottable directory.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
@Test (timeout=300000)
|
@Test (timeout=600000)
|
||||||
public void testSnapshotLimit() throws Exception {
|
public void testSnapshotLimit() throws Exception {
|
||||||
final int step = 1000;
|
final int step = 1000;
|
||||||
final String dirStr = "/testSnapshotLimit/dir";
|
final String dirStr = "/testSnapshotLimit/dir";
|
||||||
|
@ -208,7 +210,8 @@ public class TestNestedSnapshots {
|
||||||
hdfs.allowSnapshot(dir);
|
hdfs.allowSnapshot(dir);
|
||||||
|
|
||||||
int s = 0;
|
int s = 0;
|
||||||
for(; s < SNAPSHOT_QUOTA_DEFAULT; s++) {
|
for(; s < SNAPSHOTLIMIT; s++) {
|
||||||
|
SnapshotTestHelper.LOG.info("Creating snapshot number: {}", s);
|
||||||
final String snapshotName = "s" + s;
|
final String snapshotName = "s" + s;
|
||||||
hdfs.createSnapshot(dir, snapshotName);
|
hdfs.createSnapshot(dir, snapshotName);
|
||||||
|
|
||||||
|
@ -226,10 +229,10 @@ public class TestNestedSnapshots {
|
||||||
SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
|
SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int f = 0; f < SNAPSHOT_QUOTA_DEFAULT; f += step) {
|
for(int f = 0; f < SNAPSHOTLIMIT; f += step) {
|
||||||
final String file = "f" + f;
|
final String file = "f" + f;
|
||||||
s = RANDOM.nextInt(step);
|
s = RANDOM.nextInt(step);
|
||||||
for(; s < SNAPSHOT_QUOTA_DEFAULT; s += RANDOM.nextInt(step)) {
|
for(; s < SNAPSHOTLIMIT; s += RANDOM.nextInt(step)) {
|
||||||
final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
|
final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
|
||||||
//the file #f exists in snapshot #s iff s > f.
|
//the file #f exists in snapshot #s iff s > f.
|
||||||
Assert.assertEquals(s > f, hdfs.exists(p));
|
Assert.assertEquals(s > f, hdfs.exists(p));
|
||||||
|
|
Loading…
Reference in New Issue