HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via lei)
This commit is contained in:
parent
b11008d54d
commit
e50aa53eed
|
@ -2667,6 +2667,8 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats.
|
HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats.
|
||||||
(Xiaoyu Yao)
|
(Xiaoyu Yao)
|
||||||
|
|
||||||
|
HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via lei)
|
||||||
|
|
||||||
Release 2.7.3 - UNRELEASED
|
Release 2.7.3 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -493,6 +493,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
// Disable the volume from the service.
|
// Disable the volume from the service.
|
||||||
asyncDiskService.removeVolume(sd.getCurrentDir());
|
asyncDiskService.removeVolume(sd.getCurrentDir());
|
||||||
volumes.removeVolume(absRoot, clearFailure);
|
volumes.removeVolume(absRoot, clearFailure);
|
||||||
|
volumes.waitVolumeRemoved(5000, this);
|
||||||
|
|
||||||
// Removed all replica information for the blocks on the volume.
|
// Removed all replica information for the blocks on the volume.
|
||||||
// Unlike updating the volumeMap in addVolume(), this operation does
|
// Unlike updating the volumeMap in addVolume(), this operation does
|
||||||
|
@ -1769,6 +1770,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
*
|
*
|
||||||
* @throws IOException May be thrown from the methods called.
|
* @throws IOException May be thrown from the methods called.
|
||||||
*/
|
*/
|
||||||
|
@Override // FsDatasetSpi
|
||||||
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
|
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
|
||||||
throws ReplicaNotFoundException, UnexpectedReplicaStateException,
|
throws ReplicaNotFoundException, UnexpectedReplicaStateException,
|
||||||
FileNotFoundException, EOFException, IOException {
|
FileNotFoundException, EOFException, IOException {
|
||||||
|
|
|
@ -235,29 +235,30 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Close this volume and wait all other threads to release the reference count
|
* Close this volume.
|
||||||
* on this volume.
|
* @throws IOException if the volume is closed.
|
||||||
* @throws IOException if the volume is closed or the waiting is interrupted.
|
|
||||||
*/
|
*/
|
||||||
void closeAndWait() throws IOException {
|
void setClosed() throws IOException {
|
||||||
try {
|
try {
|
||||||
this.reference.setClosed();
|
this.reference.setClosed();
|
||||||
} catch (ClosedChannelException e) {
|
} catch (ClosedChannelException e) {
|
||||||
throw new IOException("The volume has already closed.", e);
|
throw new IOException("The volume has already closed.", e);
|
||||||
}
|
}
|
||||||
final int SLEEP_MILLIS = 500;
|
}
|
||||||
while (this.reference.getReferenceCount() > 0) {
|
|
||||||
|
/**
|
||||||
|
* Check whether this volume has successfully been closed.
|
||||||
|
*/
|
||||||
|
boolean checkClosed() {
|
||||||
|
if (this.reference.getReferenceCount() > 0) {
|
||||||
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
||||||
FsDatasetImpl.LOG.debug(String.format(
|
FsDatasetImpl.LOG.debug(String.format(
|
||||||
"The reference count for %s is %d, wait to be 0.",
|
"The reference count for %s is %d, wait to be 0.",
|
||||||
this, reference.getReferenceCount()));
|
this, reference.getReferenceCount()));
|
||||||
}
|
}
|
||||||
try {
|
return false;
|
||||||
Thread.sleep(SLEEP_MILLIS);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new IOException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
File getCurrentDir() {
|
File getCurrentDir() {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.nio.channels.ClosedChannelException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -49,6 +50,8 @@ class FsVolumeList {
|
||||||
// Tracks volume failures, sorted by volume path.
|
// Tracks volume failures, sorted by volume path.
|
||||||
private final Map<String, VolumeFailureInfo> volumeFailureInfos =
|
private final Map<String, VolumeFailureInfo> volumeFailureInfos =
|
||||||
Collections.synchronizedMap(new TreeMap<String, VolumeFailureInfo>());
|
Collections.synchronizedMap(new TreeMap<String, VolumeFailureInfo>());
|
||||||
|
private final ConcurrentLinkedQueue<FsVolumeImpl> volumesBeingRemoved =
|
||||||
|
new ConcurrentLinkedQueue<>();
|
||||||
private Object checkDirsMutex = new Object();
|
private Object checkDirsMutex = new Object();
|
||||||
|
|
||||||
private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
|
private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
|
||||||
|
@ -257,10 +260,33 @@ class FsVolumeList {
|
||||||
+ " failure volumes.");
|
+ " failure volumes.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waitVolumeRemoved(5000, checkDirsMutex);
|
||||||
return failedVols;
|
return failedVols;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wait for the reference of the volume removed from a previous
|
||||||
|
* {@link #removeVolume(FsVolumeImpl)} call to be released.
|
||||||
|
*
|
||||||
|
* @param sleepMillis interval to recheck.
|
||||||
|
*/
|
||||||
|
void waitVolumeRemoved(int sleepMillis, Object monitor) {
|
||||||
|
while (!checkVolumesRemoved()) {
|
||||||
|
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
||||||
|
FsDatasetImpl.LOG.debug("Waiting for volume reference to be released.");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
monitor.wait(sleepMillis);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
FsDatasetImpl.LOG.info("Thread interrupted when waiting for "
|
||||||
|
+ "volume reference to be released.");
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
FsDatasetImpl.LOG.info("Volume reference is released.");
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return volumes.toString();
|
return volumes.toString();
|
||||||
|
@ -298,12 +324,13 @@ class FsVolumeList {
|
||||||
blockScanner.removeVolumeScanner(target);
|
blockScanner.removeVolumeScanner(target);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
target.closeAndWait();
|
target.setClosed();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
FsDatasetImpl.LOG.warn(
|
FsDatasetImpl.LOG.warn(
|
||||||
"Error occurs when waiting volume to close: " + target, e);
|
"Error occurs when waiting volume to close: " + target, e);
|
||||||
}
|
}
|
||||||
target.shutdown();
|
target.shutdown();
|
||||||
|
volumesBeingRemoved.add(target);
|
||||||
FsDatasetImpl.LOG.info("Removed volume: " + target);
|
FsDatasetImpl.LOG.info("Removed volume: " + target);
|
||||||
} else {
|
} else {
|
||||||
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
||||||
|
@ -336,6 +363,24 @@ class FsVolumeList {
|
||||||
return infos.toArray(new VolumeFailureInfo[infos.size()]);
|
return infos.toArray(new VolumeFailureInfo[infos.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether the reference of the volume from a previous
|
||||||
|
* {@link #removeVolume(FsVolumeImpl)} call is released.
|
||||||
|
*
|
||||||
|
* @return Whether the reference is released.
|
||||||
|
*/
|
||||||
|
boolean checkVolumesRemoved() {
|
||||||
|
Iterator<FsVolumeImpl> it = volumesBeingRemoved.iterator();
|
||||||
|
while (it.hasNext()) {
|
||||||
|
FsVolumeImpl volume = it.next();
|
||||||
|
if (!volume.checkClosed()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
it.remove();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void addVolumeFailureInfo(VolumeFailureInfo volumeFailureInfo) {
|
void addVolumeFailureInfo(VolumeFailureInfo volumeFailureInfo) {
|
||||||
volumeFailureInfos.put(volumeFailureInfo.getFailedStorageLocation(),
|
volumeFailureInfos.put(volumeFailureInfo.getFailedStorageLocation(),
|
||||||
volumeFailureInfo);
|
volumeFailureInfo);
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -65,6 +66,7 @@ import java.io.Writer;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
@ -89,8 +91,11 @@ import static org.mockito.Mockito.never;
|
||||||
import static org.mockito.Mockito.spy;
|
import static org.mockito.Mockito.spy;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
public class TestFsDatasetImpl {
|
public class TestFsDatasetImpl {
|
||||||
|
Logger LOG = LoggerFactory.getLogger(TestFsDatasetImpl.class);
|
||||||
private static final String BASE_DIR =
|
private static final String BASE_DIR =
|
||||||
new FileSystemTestHelper().getTestRootDir();
|
new FileSystemTestHelper().getTestRootDir();
|
||||||
private static final int NUM_INIT_VOLUMES = 2;
|
private static final int NUM_INIT_VOLUMES = 2;
|
||||||
|
@ -119,6 +124,7 @@ public class TestFsDatasetImpl {
|
||||||
List<Storage.StorageDirectory> dirs =
|
List<Storage.StorageDirectory> dirs =
|
||||||
new ArrayList<Storage.StorageDirectory>();
|
new ArrayList<Storage.StorageDirectory>();
|
||||||
List<String> dirStrings = new ArrayList<String>();
|
List<String> dirStrings = new ArrayList<String>();
|
||||||
|
FileUtils.deleteDirectory(new File(BASE_DIR));
|
||||||
for (int i = 0; i < numDirs; i++) {
|
for (int i = 0; i < numDirs; i++) {
|
||||||
File loc = new File(BASE_DIR + "/data" + i);
|
File loc = new File(BASE_DIR + "/data" + i);
|
||||||
dirStrings.add(new Path(loc.toString()).toUri().toString());
|
dirStrings.add(new Path(loc.toString()).toUri().toString());
|
||||||
|
@ -296,6 +302,7 @@ public class TestFsDatasetImpl {
|
||||||
FsVolumeImpl volume = mock(FsVolumeImpl.class);
|
FsVolumeImpl volume = mock(FsVolumeImpl.class);
|
||||||
oldVolumes.add(volume);
|
oldVolumes.add(volume);
|
||||||
when(volume.getBasePath()).thenReturn("data" + i);
|
when(volume.getBasePath()).thenReturn("data" + i);
|
||||||
|
when(volume.checkClosed()).thenReturn(true);
|
||||||
FsVolumeReference ref = mock(FsVolumeReference.class);
|
FsVolumeReference ref = mock(FsVolumeReference.class);
|
||||||
when(ref.getVolume()).thenReturn(volume);
|
when(ref.getVolume()).thenReturn(volume);
|
||||||
volumeList.addVolume(ref);
|
volumeList.addVolume(ref);
|
||||||
|
@ -541,4 +548,52 @@ public class TestFsDatasetImpl {
|
||||||
|
|
||||||
return dfsUsed;
|
return dfsUsed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 30000)
|
||||||
|
public void testRemoveVolumeBeingWritten() throws Exception {
|
||||||
|
// Will write and remove on dn0.
|
||||||
|
final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0);
|
||||||
|
final CountDownLatch startFinalizeLatch = new CountDownLatch(1);
|
||||||
|
final CountDownLatch brReceivedLatch = new CountDownLatch(1);
|
||||||
|
class BlockReportThread extends Thread {
|
||||||
|
public void run() {
|
||||||
|
LOG.info("Getting block report");
|
||||||
|
dataset.getBlockReports(eb.getBlockPoolId());
|
||||||
|
LOG.info("Successfully received block report");
|
||||||
|
brReceivedLatch.countDown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
final BlockReportThread brt = new BlockReportThread();
|
||||||
|
class ResponderThread extends Thread {
|
||||||
|
public void run() {
|
||||||
|
try (ReplicaHandler replica = dataset
|
||||||
|
.createRbw(StorageType.DEFAULT, eb, false)) {
|
||||||
|
LOG.info("createRbw finished");
|
||||||
|
startFinalizeLatch.countDown();
|
||||||
|
|
||||||
|
// Slow down while we're holding the reference to the volume
|
||||||
|
Thread.sleep(1000);
|
||||||
|
dataset.finalizeBlock(eb);
|
||||||
|
LOG.info("finalizeBlock finished");
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.warn("Exception caught. This should not affect the test", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ResponderThread res = new ResponderThread();
|
||||||
|
res.start();
|
||||||
|
startFinalizeLatch.await();
|
||||||
|
|
||||||
|
Set<File> volumesToRemove = new HashSet<>();
|
||||||
|
volumesToRemove.add(
|
||||||
|
StorageLocation.parse(dataset.getVolume(eb).getBasePath()).getFile());
|
||||||
|
LOG.info("Removing volume " + volumesToRemove);
|
||||||
|
// Verify block report can be received during this
|
||||||
|
brt.start();
|
||||||
|
dataset.removeVolumes(volumesToRemove, true);
|
||||||
|
LOG.info("Volumes removed");
|
||||||
|
brReceivedLatch.await();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
||||||
|
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
@ -25,6 +26,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -33,9 +35,11 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
import static org.junit.Assert.assertNotEquals;
|
import static org.junit.Assert.assertNotEquals;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
public class TestFsVolumeList {
|
public class TestFsVolumeList {
|
||||||
|
@ -57,11 +61,11 @@ public class TestFsVolumeList {
|
||||||
blockScanner = new BlockScanner(null, blockScannerConf);
|
blockScanner = new BlockScanner(null, blockScannerConf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test(timeout=30000)
|
||||||
public void testGetNextVolumeWithClosedVolume() throws IOException {
|
public void testGetNextVolumeWithClosedVolume() throws IOException {
|
||||||
FsVolumeList volumeList = new FsVolumeList(
|
FsVolumeList volumeList = new FsVolumeList(
|
||||||
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
|
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
|
||||||
List<FsVolumeImpl> volumes = new ArrayList<>();
|
final List<FsVolumeImpl> volumes = new ArrayList<>();
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
File curDir = new File(baseDir, "nextvolume-" + i);
|
File curDir = new File(baseDir, "nextvolume-" + i);
|
||||||
curDir.mkdirs();
|
curDir.mkdirs();
|
||||||
|
@ -73,7 +77,19 @@ public class TestFsVolumeList {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the second volume.
|
// Close the second volume.
|
||||||
volumes.get(1).closeAndWait();
|
volumes.get(1).setClosed();
|
||||||
|
try {
|
||||||
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
|
@Override
|
||||||
|
public Boolean get() {
|
||||||
|
return volumes.get(1).checkClosed();
|
||||||
|
}
|
||||||
|
}, 100, 3000);
|
||||||
|
} catch (TimeoutException e) {
|
||||||
|
fail("timed out while waiting for volume to be removed.");
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
}
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
try (FsVolumeReference ref =
|
try (FsVolumeReference ref =
|
||||||
volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
|
volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
|
||||||
|
@ -83,11 +99,11 @@ public class TestFsVolumeList {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test(timeout=30000)
|
||||||
public void testCheckDirsWithClosedVolume() throws IOException {
|
public void testCheckDirsWithClosedVolume() throws IOException {
|
||||||
FsVolumeList volumeList = new FsVolumeList(
|
FsVolumeList volumeList = new FsVolumeList(
|
||||||
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
|
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
|
||||||
List<FsVolumeImpl> volumes = new ArrayList<>();
|
final List<FsVolumeImpl> volumes = new ArrayList<>();
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
File curDir = new File(baseDir, "volume-" + i);
|
File curDir = new File(baseDir, "volume-" + i);
|
||||||
curDir.mkdirs();
|
curDir.mkdirs();
|
||||||
|
@ -98,12 +114,24 @@ public class TestFsVolumeList {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the 2nd volume.
|
// Close the 2nd volume.
|
||||||
volumes.get(1).closeAndWait();
|
volumes.get(1).setClosed();
|
||||||
|
try {
|
||||||
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
|
@Override
|
||||||
|
public Boolean get() {
|
||||||
|
return volumes.get(1).checkClosed();
|
||||||
|
}
|
||||||
|
}, 100, 3000);
|
||||||
|
} catch (TimeoutException e) {
|
||||||
|
fail("timed out while waiting for volume to be removed.");
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
}
|
||||||
// checkDirs() should ignore the 2nd volume since it is closed.
|
// checkDirs() should ignore the 2nd volume since it is closed.
|
||||||
volumeList.checkDirs();
|
volumeList.checkDirs();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test(timeout=30000)
|
||||||
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
|
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
|
||||||
FsVolumeList volumeList = new FsVolumeList(
|
FsVolumeList volumeList = new FsVolumeList(
|
||||||
Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
|
Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
|
||||||
|
|
Loading…
Reference in New Issue