HDFS-5987. Fix findbugs warnings in Rolling Upgrade branch. (Contributed by szetszwo)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1570389 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0b51bdd94f
commit
329c705181
|
@ -65,3 +65,7 @@ HDFS-5535 subtasks:
|
||||||
|
|
||||||
HDFS-5985. SimulatedFSDataset#disableAndPurgeTrashStorage should not throw
|
HDFS-5985. SimulatedFSDataset#disableAndPurgeTrashStorage should not throw
|
||||||
UnsupportedOperationException. (jing9 via kihwal)
|
UnsupportedOperationException. (jing9 via kihwal)
|
||||||
|
|
||||||
|
HDFS-5987. Fix findbugs warnings in Rolling Upgrade branch. (seztszwo via
|
||||||
|
Arpit Agarwal)
|
||||||
|
|
||||||
|
|
|
@ -1037,7 +1037,7 @@ public class Journal implements Closeable {
|
||||||
storage.getJournalManager().doRollback();
|
storage.getJournalManager().doRollback();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void discardSegments(long startTxId) throws IOException {
|
synchronized void discardSegments(long startTxId) throws IOException {
|
||||||
storage.getJournalManager().discardSegments(startTxId);
|
storage.getJournalManager().discardSegments(startTxId);
|
||||||
// we delete all the segments after the startTxId. let's reset committedTxnId
|
// we delete all the segments after the startTxId. let's reset committedTxnId
|
||||||
committedTxnId.set(startTxId - 1);
|
committedTxnId.set(startTxId - 1);
|
||||||
|
|
|
@ -27,21 +27,21 @@ import java.util.Properties;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.HardLink;
|
import org.apache.hadoop.fs.HardLink;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manages storage for the set of BlockPoolSlices which share a particular
|
* Manages storage for the set of BlockPoolSlices which share a particular
|
||||||
* block pool id, on this DataNode.
|
* block pool id, on this DataNode.
|
||||||
|
@ -382,8 +382,9 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* locations under current/
|
* locations under current/
|
||||||
*
|
*
|
||||||
* @param trashRoot
|
* @param trashRoot
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private int restoreBlockFilesFromTrash(File trashRoot) {
|
private int restoreBlockFilesFromTrash(File trashRoot) throws IOException {
|
||||||
int filesRestored = 0;
|
int filesRestored = 0;
|
||||||
File restoreDirectory = null;
|
File restoreDirectory = null;
|
||||||
|
|
||||||
|
@ -395,10 +396,15 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
|
|
||||||
if (restoreDirectory == null) {
|
if (restoreDirectory == null) {
|
||||||
restoreDirectory = new File(getRestoreDirectory(child));
|
restoreDirectory = new File(getRestoreDirectory(child));
|
||||||
restoreDirectory.mkdirs();
|
if (!restoreDirectory.mkdirs()) {
|
||||||
|
throw new IOException("Failed to create directory " + restoreDirectory);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
child.renameTo(new File(restoreDirectory, child.getName()));
|
final File newChild = new File(restoreDirectory, child.getName());
|
||||||
|
if (!child.renameTo(newChild)) {
|
||||||
|
throw new IOException("Failed to rename " + child + " to " + newChild);
|
||||||
|
}
|
||||||
++filesRestored;
|
++filesRestored;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -198,7 +198,10 @@ class FsDatasetAsyncDiskService {
|
||||||
private boolean moveFiles() {
|
private boolean moveFiles() {
|
||||||
File newBlockFile = new File(trashDirectory, blockFile.getName());
|
File newBlockFile = new File(trashDirectory, blockFile.getName());
|
||||||
File newMetaFile = new File(trashDirectory, metaFile.getName());
|
File newMetaFile = new File(trashDirectory, metaFile.getName());
|
||||||
(new File(trashDirectory)).mkdirs();
|
if (!new File(trashDirectory).mkdirs()) {
|
||||||
|
LOG.error("Failed to create trash directory " + trashDirectory);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Moving files " + blockFile.getName() + " and " +
|
LOG.debug("Moving files " + blockFile.getName() + " and " +
|
||||||
|
|
|
@ -65,16 +65,13 @@ public abstract class MD5FileUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read the md5 checksum stored alongside the given file, or null
|
* Read the md5 file stored alongside the given data file
|
||||||
* if no md5 is stored.
|
* and match the md5 file content.
|
||||||
* @param dataFile the file containing data
|
* @param dataFile the file containing data
|
||||||
* @return the checksum stored in dataFile.md5
|
* @return a matcher with two matched groups
|
||||||
|
* where group(1) is the md5 string and group(2) is the data file path.
|
||||||
*/
|
*/
|
||||||
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
|
private static Matcher readStoredMd5(File md5File) throws IOException {
|
||||||
File md5File = getDigestFileForFile(dataFile);
|
|
||||||
|
|
||||||
String md5Line;
|
|
||||||
|
|
||||||
if (!md5File.exists()) {
|
if (!md5File.exists()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -82,6 +79,7 @@ public abstract class MD5FileUtils {
|
||||||
BufferedReader reader =
|
BufferedReader reader =
|
||||||
new BufferedReader(new InputStreamReader(new FileInputStream(
|
new BufferedReader(new InputStreamReader(new FileInputStream(
|
||||||
md5File), Charsets.UTF_8));
|
md5File), Charsets.UTF_8));
|
||||||
|
String md5Line;
|
||||||
try {
|
try {
|
||||||
md5Line = reader.readLine();
|
md5Line = reader.readLine();
|
||||||
if (md5Line == null) { md5Line = ""; }
|
if (md5Line == null) { md5Line = ""; }
|
||||||
|
@ -94,9 +92,20 @@ public abstract class MD5FileUtils {
|
||||||
|
|
||||||
Matcher matcher = LINE_REGEX.matcher(md5Line);
|
Matcher matcher = LINE_REGEX.matcher(md5Line);
|
||||||
if (!matcher.matches()) {
|
if (!matcher.matches()) {
|
||||||
throw new IOException("Invalid MD5 file at " + md5File
|
throw new IOException("Invalid MD5 file " + md5File + ": the content \""
|
||||||
+ " (does not match expected pattern)");
|
+ md5Line + "\" does not match the expected pattern.");
|
||||||
}
|
}
|
||||||
|
return matcher;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read the md5 checksum stored alongside the given data file.
|
||||||
|
* @param dataFile the file containing data
|
||||||
|
* @return the checksum stored in dataFile.md5
|
||||||
|
*/
|
||||||
|
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
|
||||||
|
final File md5File = getDigestFileForFile(dataFile);
|
||||||
|
final Matcher matcher = readStoredMd5(md5File);
|
||||||
String storedHash = matcher.group(1);
|
String storedHash = matcher.group(1);
|
||||||
File referencedFile = new File(matcher.group(2));
|
File referencedFile = new File(matcher.group(2));
|
||||||
|
|
||||||
|
@ -155,19 +164,8 @@ public abstract class MD5FileUtils {
|
||||||
|
|
||||||
public static void renameMD5File(File oldDataFile, File newDataFile)
|
public static void renameMD5File(File oldDataFile, File newDataFile)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
File fromFile = getDigestFileForFile(oldDataFile);
|
final File fromFile = getDigestFileForFile(oldDataFile);
|
||||||
BufferedReader in = null;
|
final String digestString = readStoredMd5(fromFile).group(1);
|
||||||
final String digestString;
|
|
||||||
try {
|
|
||||||
in = new BufferedReader(new InputStreamReader(new FileInputStream(
|
|
||||||
fromFile), Charsets.UTF_8));
|
|
||||||
String line = in.readLine();
|
|
||||||
String[] split = line.split(" \\*");
|
|
||||||
digestString = split[0];
|
|
||||||
} finally {
|
|
||||||
IOUtils.cleanup(LOG, in);
|
|
||||||
}
|
|
||||||
|
|
||||||
saveMD5File(newDataFile, digestString);
|
saveMD5File(newDataFile, digestString);
|
||||||
|
|
||||||
if (!fromFile.delete()) {
|
if (!fromFile.delete()) {
|
||||||
|
|
|
@ -253,33 +253,57 @@ public class TestRollingUpgrade {
|
||||||
|
|
||||||
final Path foo = new Path("/foo");
|
final Path foo = new Path("/foo");
|
||||||
final Path bar = new Path("/bar");
|
final Path bar = new Path("/bar");
|
||||||
|
cluster.getFileSystem().mkdirs(foo);
|
||||||
|
|
||||||
{
|
startRollingUpgrade(foo, bar, cluster);
|
||||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
cluster.getFileSystem().rollEdits();
|
||||||
dfs.mkdirs(foo);
|
cluster.getFileSystem().rollEdits();
|
||||||
|
rollbackRollingUpgrade(foo, bar, cluster);
|
||||||
//start rolling upgrade
|
|
||||||
dfs.rollingUpgrade(RollingUpgradeAction.START);
|
|
||||||
|
|
||||||
dfs.mkdirs(bar);
|
|
||||||
|
|
||||||
Assert.assertTrue(dfs.exists(foo));
|
|
||||||
Assert.assertTrue(dfs.exists(bar));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restart should succeed!
|
startRollingUpgrade(foo, bar, cluster);
|
||||||
|
cluster.getFileSystem().rollEdits();
|
||||||
|
cluster.getFileSystem().rollEdits();
|
||||||
|
rollbackRollingUpgrade(foo, bar, cluster);
|
||||||
|
|
||||||
|
startRollingUpgrade(foo, bar, cluster);
|
||||||
cluster.restartNameNode();
|
cluster.restartNameNode();
|
||||||
|
rollbackRollingUpgrade(foo, bar, cluster);
|
||||||
|
|
||||||
cluster.restartNameNode("-rollingUpgrade", "rollback");
|
startRollingUpgrade(foo, bar, cluster);
|
||||||
{
|
cluster.restartNameNode();
|
||||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
rollbackRollingUpgrade(foo, bar, cluster);
|
||||||
Assert.assertTrue(dfs.exists(foo));
|
|
||||||
Assert.assertFalse(dfs.exists(bar));
|
startRollingUpgrade(foo, bar, cluster);
|
||||||
}
|
rollbackRollingUpgrade(foo, bar, cluster);
|
||||||
|
|
||||||
|
startRollingUpgrade(foo, bar, cluster);
|
||||||
|
rollbackRollingUpgrade(foo, bar, cluster);
|
||||||
} finally {
|
} finally {
|
||||||
if(cluster != null) cluster.shutdown();
|
if(cluster != null) cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void startRollingUpgrade(Path foo, Path bar,
|
||||||
|
MiniDFSCluster cluster) throws IOException {
|
||||||
|
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
|
|
||||||
|
//start rolling upgrade
|
||||||
|
dfs.rollingUpgrade(RollingUpgradeAction.START);
|
||||||
|
|
||||||
|
dfs.mkdirs(bar);
|
||||||
|
|
||||||
|
Assert.assertTrue(dfs.exists(foo));
|
||||||
|
Assert.assertTrue(dfs.exists(bar));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void rollbackRollingUpgrade(Path foo, Path bar,
|
||||||
|
MiniDFSCluster cluster) throws IOException {
|
||||||
|
cluster.restartNameNode("-rollingUpgrade", "rollback");
|
||||||
|
|
||||||
|
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
|
Assert.assertTrue(dfs.exists(foo));
|
||||||
|
Assert.assertFalse(dfs.exists(bar));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
|
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
|
||||||
|
|
Loading…
Reference in New Issue