HDDS-267. Handle consistency issues during container update/close.

This commit is contained in:
Hanisha Koneru 2018-08-08 16:47:25 -07:00
parent 8478732bb2
commit d81cd3611a
6 changed files with 80 additions and 75 deletions

View File

@ -257,7 +257,6 @@ public synchronized boolean isClosed() {
* Marks this container as closed.
*/
public synchronized void closeContainer() {
// TODO: closed or closing here
setState(ContainerLifeCycleState.CLOSED);
}

View File

@ -138,7 +138,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
// Create .container file
File containerFile = getContainerFile();
writeToContainerFile(containerFile, true);
createContainerFile(containerFile);
} catch (StorageContainerException ex) {
if (containerMetaDataPath != null && containerMetaDataPath.getParentFile()
@ -165,11 +165,11 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
}
/**
* Creates .container file and checksum file.
* Writes to .container file.
*
* @param containerFile
* @param isCreate true if we are creating a new container file and false if
* we are updating an existing container file.
* @param containerFile container file name
* @param isCreate True if creating a new file. False is updating an
* existing container file.
* @throws StorageContainerException
*/
private void writeToContainerFile(File containerFile, boolean isCreate)
@ -181,19 +181,18 @@ private void writeToContainerFile(File containerFile, boolean isCreate)
ContainerDataYaml.createContainerFile(
ContainerType.KeyValueContainer, containerData, tempContainerFile);
// NativeIO.renameTo is an atomic function. But it might fail if the
// container file already exists. Hence, we handle the two cases
// separately.
if (isCreate) {
// When creating a new container, .container file should not exist
// already.
NativeIO.renameTo(tempContainerFile, containerFile);
} else {
// When updating a container, the .container file should exist. If
// not, the container is in an inconsistent state.
Files.move(tempContainerFile.toPath(), containerFile.toPath(),
StandardCopyOption.REPLACE_EXISTING);
}
} catch (IOException ex) {
throw new StorageContainerException("Error during creation of " +
throw new StorageContainerException("Error while creating/ updating " +
".container file. ContainerID: " + containerId, ex,
CONTAINER_FILES_CREATE_ERROR);
} finally {
@ -206,27 +205,14 @@ private void writeToContainerFile(File containerFile, boolean isCreate)
}
}
private void createContainerFile(File containerFile)
throws StorageContainerException {
writeToContainerFile(containerFile, true);
}
private void updateContainerFile(File containerFile)
throws StorageContainerException {
long containerId = containerData.getContainerID();
if (!containerFile.exists()) {
throw new StorageContainerException("Container is an Inconsistent " +
"state, missing .container file. ContainerID: " + containerId,
INVALID_CONTAINER_STATE);
}
try {
writeToContainerFile(containerFile, false);
} catch (IOException e) {
//TODO : Container update failure is not handled currently. Might
// lead to loss of .container file. When Update container feature
// support is added, this failure should also be handled.
throw new StorageContainerException("Container update failed. " +
"ContainerID: " + containerId, CONTAINER_FILES_CREATE_ERROR);
}
writeToContainerFile(containerFile, false);
}
@ -256,19 +242,15 @@ public void close() throws StorageContainerException {
// complete this action
try {
writeLock();
long containerId = containerData.getContainerID();
if(!containerData.isValid()) {
LOG.debug("Invalid container data. Container Id: {}", containerId);
throw new StorageContainerException("Invalid container data. " +
"ContainerID: " + containerId, INVALID_CONTAINER_STATE);
}
containerData.closeContainer();
File containerFile = getContainerFile();
// update the new container data to .container File
updateContainerFile(containerFile);
} catch (StorageContainerException ex) {
// Failed to update .container file. Reset the state to CLOSING
containerData.setState(ContainerLifeCycleState.CLOSING);
throw ex;
} finally {
writeUnlock();
@ -332,8 +314,6 @@ public void update(Map<String, String> metadata, boolean forceUpdate)
// update the new container data to .container File
updateContainerFile(containerFile);
} catch (StorageContainerException ex) {
// TODO:
// On error, reset the metadata.
containerData.setMetadata(oldMetadata);
throw ex;
} finally {

View File

@ -28,6 +28,8 @@
.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@ -76,6 +78,8 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CLOSED_CONTAINER_RETRY;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_INTERNAL_ERROR;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@ -378,8 +382,18 @@ ContainerCommandResponseProto handleCloseContainer(
return ContainerUtils.malformedRequest(request);
}
long containerID = kvContainer.getContainerData().getContainerID();
ContainerLifeCycleState containerState = kvContainer.getContainerState();
try {
checkContainerOpen(kvContainer);
if (containerState == ContainerLifeCycleState.CLOSED) {
throw new StorageContainerException("Container already closed. " +
"ContainerID: " + containerID, CLOSED_CONTAINER_RETRY);
} else if (containerState == ContainerLifeCycleState.INVALID) {
LOG.debug("Invalid container data. ContainerID: {}", containerID);
throw new StorageContainerException("Invalid container data. " +
"ContainerID: " + containerID, INVALID_CONTAINER_STATE);
}
KeyValueContainerData kvData = kvContainer.getContainerData();
@ -773,10 +787,9 @@ ContainerCommandResponseProto handleUnsupportedOp(
private void checkContainerOpen(KeyValueContainer kvContainer)
throws StorageContainerException {
ContainerProtos.ContainerLifeCycleState containerState =
kvContainer.getContainerState();
ContainerLifeCycleState containerState = kvContainer.getContainerState();
if (containerState == ContainerProtos.ContainerLifeCycleState.OPEN) {
if (containerState == ContainerLifeCycleState.OPEN) {
return;
} else {
String msg = "Requested operation not allowed as ContainerState is " +

View File

@ -33,7 +33,6 @@
import org.apache.hadoop.ozone.container.common.volume
.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker;
@ -242,21 +241,6 @@ public void testCloseContainer() throws Exception {
keyValueContainerData.getState());
}
@Test
public void testCloseInvalidContainer() throws Exception {
try {
keyValueContainerData.setState(ContainerProtos.ContainerLifeCycleState
.INVALID);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
keyValueContainer.close();
fail("testCloseInvalidContainer failed");
} catch (StorageContainerException ex) {
assertEquals(ContainerProtos.Result.INVALID_CONTAINER_STATE,
ex.getResult());
GenericTestUtils.assertExceptionContains("Invalid container data", ex);
}
}
@Test
public void testUpdateContainer() throws IOException {
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);

View File

@ -25,12 +25,16 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestRule;
@ -59,8 +63,8 @@ public class TestKeyValueHandler {
@Rule
public TestRule timeout = new Timeout(300000);
private HddsDispatcher dispatcher;
private KeyValueHandler handler;
private static HddsDispatcher dispatcher;
private static KeyValueHandler handler;
private final static String DATANODE_UUID = UUID.randomUUID().toString();
@ -69,14 +73,11 @@ public class TestKeyValueHandler {
private static final long DUMMY_CONTAINER_ID = 9999;
@Test
/**
* Test that Handler handles different command types correctly.
*/
public void testHandlerCommandHandling() throws Exception{
@BeforeClass
public static void setup() throws StorageContainerException {
// Create mock HddsDispatcher and KeyValueHandler.
this.handler = Mockito.mock(KeyValueHandler.class);
this.dispatcher = Mockito.mock(HddsDispatcher.class);
handler = Mockito.mock(KeyValueHandler.class);
dispatcher = Mockito.mock(HddsDispatcher.class);
Mockito.when(dispatcher.getHandler(any())).thenReturn(handler);
Mockito.when(dispatcher.dispatch(any())).thenCallRealMethod();
Mockito.when(dispatcher.getContainer(anyLong())).thenReturn(
@ -84,6 +85,13 @@ public void testHandlerCommandHandling() throws Exception{
Mockito.when(handler.handle(any(), any())).thenCallRealMethod();
doCallRealMethod().when(dispatcher).setMetricsForTesting(any());
dispatcher.setMetricsForTesting(Mockito.mock(ContainerMetrics.class));
}
@Test
/**
* Test that Handler handles different command types correctly.
*/
public void testHandlerCommandHandling() throws Exception {
// Test Create Container Request handling
ContainerCommandRequestProto createContainerRequest =
@ -250,4 +258,33 @@ private ContainerCommandRequestProto getDummyCommandRequestProto(
}
@Test
public void testCloseInvalidContainer() {
long containerID = 1234L;
Configuration conf = new Configuration();
KeyValueContainerData kvData = new KeyValueContainerData(containerID, 1);
KeyValueContainer container = new KeyValueContainer(kvData, conf);
kvData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);
// Create Close container request
ContainerCommandRequestProto closeContainerRequest =
ContainerProtos.ContainerCommandRequestProto.newBuilder()
.setCmdType(ContainerProtos.Type.CloseContainer)
.setContainerID(DUMMY_CONTAINER_ID)
.setDatanodeUuid(DATANODE_UUID)
.setCloseContainer(ContainerProtos.CloseContainerRequestProto
.getDefaultInstance())
.build();
dispatcher.dispatch(closeContainerRequest);
Mockito.when(handler.handleCloseContainer(any(), any()))
.thenCallRealMethod();
// Closing invalid container should return error response.
ContainerProtos.ContainerCommandResponseProto response =
handler.handleCloseContainer(closeContainerRequest, container);
Assert.assertTrue("Close container should return Invalid container error",
response.getResult().equals(
ContainerProtos.Result.INVALID_CONTAINER_STATE));
}
}

View File

@ -775,14 +775,6 @@ public void testUpdateContainer() throws IOException {
Assert.assertEquals("bilbo_new_1",
actualNewData.getMetadata().get("owner"));
// Update a non-existing container
exception.expect(StorageContainerException.class);
exception.expectMessage("Container is an Inconsistent " +
"state, missing .container file.");
Container nonExistentContainer = new KeyValueContainer(
new KeyValueContainerData(RandomUtils.nextLong(),
ContainerTestHelper.CONTAINER_MAX_SIZE_GB), conf);
nonExistentContainer.update(newMetadata, false);
}
private KeyData writeKeyHelper(BlockID blockID)