HDFS-12016. Ozone: SCM: Container metadata are not loaded properly after datanode restart. Contributed by Xiaoyu Yao.
This commit is contained in:
parent
77a4b248e4
commit
723da57644
|
@ -39,10 +39,12 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
|
||||
|
||||
/**
|
||||
* Ozone main class sets up the network server and initializes the container
|
||||
|
@ -73,7 +75,8 @@ public class OzoneContainer {
|
|||
OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
|
||||
if (paths != null && paths.length > 0) {
|
||||
for (String p : paths) {
|
||||
locations.add(StorageLocation.parse(p));
|
||||
locations.add(StorageLocation.parse(
|
||||
Paths.get(p).resolve(CONTAINER_ROOT_PREFIX).toString()));
|
||||
}
|
||||
} else {
|
||||
getDataDir(locations);
|
||||
|
|
|
@ -413,6 +413,8 @@ public final class MiniOzoneCluster extends MiniDFSCluster
|
|||
// and create SCM under that directory.
|
||||
Path scmPath = Paths.get(path, runID.toString(), "cont-meta");
|
||||
Files.createDirectories(scmPath);
|
||||
Path containerPath = scmPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
|
||||
Files.createDirectories(containerPath);
|
||||
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, scmPath
|
||||
.toString());
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@ import org.junit.Rule;
|
|||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
import org.junit.rules.Timeout;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
|
@ -60,6 +62,8 @@ import java.util.Map;
|
|||
import java.util.ArrayList;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
|
||||
import static org.apache.hadoop.ozone.container.ContainerTestHelper
|
||||
.createSingleNodePipeline;
|
||||
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
|
||||
|
@ -81,6 +85,8 @@ public class TestContainerPersistence {
|
|||
@Rule
|
||||
public Timeout testTimeout = new Timeout(300000);
|
||||
|
||||
private static Logger LOG =
|
||||
LoggerFactory.getLogger(TestContainerPersistence.class);
|
||||
private static String path;
|
||||
private static ContainerManagerImpl containerManager;
|
||||
private static ChunkManagerImpl chunkManager;
|
||||
|
@ -121,15 +127,31 @@ public class TestContainerPersistence {
|
|||
if (!new File(path).exists() && !new File(path).mkdirs()) {
|
||||
throw new IOException("Unable to create paths. " + path);
|
||||
}
|
||||
StorageLocation loc = StorageLocation.parse(
|
||||
Paths.get(path).resolve(CONTAINER_ROOT_PREFIX).toString());
|
||||
|
||||
pathLists.clear();
|
||||
containerManager.getContainerMap().clear();
|
||||
pathLists.add(StorageLocation.parse(path.toString()));
|
||||
|
||||
if (!new File(loc.getNormalizedUri()).mkdirs()) {
|
||||
throw new IOException("unable to create paths. " +
|
||||
loc.getNormalizedUri());
|
||||
}
|
||||
pathLists.add(loc);
|
||||
containerManager.init(conf, pathLists);
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanupDir() throws IOException {
|
||||
// Clean up SCM metadata
|
||||
LOG.info("Deletting {}", path);
|
||||
FileUtils.deleteDirectory(new File(path));
|
||||
|
||||
// Clean up SCM datanode container metadata/data
|
||||
for (String dir : conf.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
|
||||
StorageLocation location = StorageLocation.parse(dir);
|
||||
FileUtils.deleteDirectory(new File(location.getNormalizedUri()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.io.File;
|
|||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.List;
|
||||
|
@ -171,6 +172,44 @@ public class TestKeys {
|
|||
assertNotNull(helper.getFile());
|
||||
}
|
||||
|
||||
private void restartDatanode(int datanodeIdx)
|
||||
throws IOException, OzoneException, URISyntaxException {
|
||||
cluster.restartDataNode(datanodeIdx);
|
||||
// refresh the datanode endpoint uri after datanode restart
|
||||
DataNode dataNode = cluster.getDataNodes().get(datanodeIdx);
|
||||
final int port = dataNode.getInfoPort();
|
||||
client.setEndPoint(String.format("http://localhost:%d", port));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutAndGetKeyWithDnRestart()
|
||||
throws OzoneException, IOException, URISyntaxException {
|
||||
|
||||
PutHelper helper = new PutHelper();
|
||||
String keyName = helper.putKey();
|
||||
assertNotNull(helper.getBucket());
|
||||
assertNotNull(helper.getFile());
|
||||
|
||||
// restart the datanode
|
||||
restartDatanode(0);
|
||||
|
||||
// verify getKey after the datanode restart
|
||||
String newFileName = path + "/" +OzoneUtils.getRequestID().toLowerCase();
|
||||
Path newPath = Paths.get(newFileName);
|
||||
|
||||
helper.getBucket().getKey(keyName, newPath);
|
||||
|
||||
FileInputStream original = new FileInputStream(helper.getFile());
|
||||
FileInputStream downloaded = new FileInputStream(newPath.toFile());
|
||||
|
||||
|
||||
String originalHash = DigestUtils.sha256Hex(original);
|
||||
String downloadedHash = DigestUtils.sha256Hex(downloaded);
|
||||
|
||||
assertEquals(
|
||||
"Sha256 does not match between original file and downloaded file.",
|
||||
originalHash, downloadedHash);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutAndGetKey() throws OzoneException, IOException {
|
||||
|
|
Loading…
Reference in New Issue