HDFS-5987. Fix findbugs warnings in Rolling Upgrade branch. (Contributed by szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1570389 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2014-02-20 23:21:11 +00:00
parent 0b51bdd94f
commit 329c705181
6 changed files with 86 additions and 51 deletions

View File

@ -65,3 +65,7 @@ HDFS-5535 subtasks:
HDFS-5985. SimulatedFSDataset#disableAndPurgeTrashStorage should not throw
UnsupportedOperationException. (jing9 via kihwal)
HDFS-5987. Fix findbugs warnings in Rolling Upgrade branch. (seztszwo via
Arpit Agarwal)

View File

@ -1037,7 +1037,7 @@ public class Journal implements Closeable {
storage.getJournalManager().doRollback();
}
public void discardSegments(long startTxId) throws IOException {
synchronized void discardSegments(long startTxId) throws IOException {
storage.getJournalManager().discardSegments(startTxId);
// we delete all the segments after the startTxId. let's reset committedTxnId
committedTxnId.set(startTxId - 1);

View File

@ -27,21 +27,21 @@ import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.util.Daemon;
import com.google.common.annotations.VisibleForTesting;
/**
* Manages storage for the set of BlockPoolSlices which share a particular
* block pool id, on this DataNode.
@ -382,8 +382,9 @@ public class BlockPoolSliceStorage extends Storage {
* locations under current/
*
* @param trashRoot
* @throws IOException
*/
private int restoreBlockFilesFromTrash(File trashRoot) {
private int restoreBlockFilesFromTrash(File trashRoot) throws IOException {
int filesRestored = 0;
File restoreDirectory = null;
@ -395,10 +396,15 @@ public class BlockPoolSliceStorage extends Storage {
if (restoreDirectory == null) {
restoreDirectory = new File(getRestoreDirectory(child));
restoreDirectory.mkdirs();
if (!restoreDirectory.mkdirs()) {
throw new IOException("Failed to create directory " + restoreDirectory);
}
}
child.renameTo(new File(restoreDirectory, child.getName()));
final File newChild = new File(restoreDirectory, child.getName());
if (!child.renameTo(newChild)) {
throw new IOException("Failed to rename " + child + " to " + newChild);
}
++filesRestored;
}

View File

@ -198,7 +198,10 @@ class FsDatasetAsyncDiskService {
private boolean moveFiles() {
File newBlockFile = new File(trashDirectory, blockFile.getName());
File newMetaFile = new File(trashDirectory, metaFile.getName());
(new File(trashDirectory)).mkdirs();
if (!new File(trashDirectory).mkdirs()) {
LOG.error("Failed to create trash directory " + trashDirectory);
return false;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Moving files " + blockFile.getName() + " and " +

View File

@ -65,16 +65,13 @@ public abstract class MD5FileUtils {
}
/**
* Read the md5 checksum stored alongside the given file, or null
* if no md5 is stored.
* Read the md5 file stored alongside the given data file
* and match the md5 file content.
* @param dataFile the file containing data
* @return the checksum stored in dataFile.md5
* @return a matcher with two matched groups
* where group(1) is the md5 string and group(2) is the data file path.
*/
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
File md5File = getDigestFileForFile(dataFile);
String md5Line;
private static Matcher readStoredMd5(File md5File) throws IOException {
if (!md5File.exists()) {
return null;
}
@ -82,6 +79,7 @@ public abstract class MD5FileUtils {
BufferedReader reader =
new BufferedReader(new InputStreamReader(new FileInputStream(
md5File), Charsets.UTF_8));
String md5Line;
try {
md5Line = reader.readLine();
if (md5Line == null) { md5Line = ""; }
@ -94,9 +92,20 @@ public abstract class MD5FileUtils {
Matcher matcher = LINE_REGEX.matcher(md5Line);
if (!matcher.matches()) {
throw new IOException("Invalid MD5 file at " + md5File
+ " (does not match expected pattern)");
throw new IOException("Invalid MD5 file " + md5File + ": the content \""
+ md5Line + "\" does not match the expected pattern.");
}
return matcher;
}
/**
* Read the md5 checksum stored alongside the given data file.
* @param dataFile the file containing data
* @return the checksum stored in dataFile.md5
*/
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
final File md5File = getDigestFileForFile(dataFile);
final Matcher matcher = readStoredMd5(md5File);
String storedHash = matcher.group(1);
File referencedFile = new File(matcher.group(2));
@ -155,19 +164,8 @@ public abstract class MD5FileUtils {
public static void renameMD5File(File oldDataFile, File newDataFile)
throws IOException {
File fromFile = getDigestFileForFile(oldDataFile);
BufferedReader in = null;
final String digestString;
try {
in = new BufferedReader(new InputStreamReader(new FileInputStream(
fromFile), Charsets.UTF_8));
String line = in.readLine();
String[] split = line.split(" \\*");
digestString = split[0];
} finally {
IOUtils.cleanup(LOG, in);
}
final File fromFile = getDigestFileForFile(oldDataFile);
final String digestString = readStoredMd5(fromFile).group(1);
saveMD5File(newDataFile, digestString);
if (!fromFile.delete()) {

View File

@ -253,34 +253,58 @@ public class TestRollingUpgrade {
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
cluster.getFileSystem().mkdirs(foo);
{
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(foo);
startRollingUpgrade(foo, bar, cluster);
cluster.getFileSystem().rollEdits();
cluster.getFileSystem().rollEdits();
rollbackRollingUpgrade(foo, bar, cluster);
//start rolling upgrade
dfs.rollingUpgrade(RollingUpgradeAction.START);
startRollingUpgrade(foo, bar, cluster);
cluster.getFileSystem().rollEdits();
cluster.getFileSystem().rollEdits();
rollbackRollingUpgrade(foo, bar, cluster);
dfs.mkdirs(bar);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
}
// Restart should succeed!
startRollingUpgrade(foo, bar, cluster);
cluster.restartNameNode();
rollbackRollingUpgrade(foo, bar, cluster);
cluster.restartNameNode("-rollingUpgrade", "rollback");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
}
startRollingUpgrade(foo, bar, cluster);
cluster.restartNameNode();
rollbackRollingUpgrade(foo, bar, cluster);
startRollingUpgrade(foo, bar, cluster);
rollbackRollingUpgrade(foo, bar, cluster);
startRollingUpgrade(foo, bar, cluster);
rollbackRollingUpgrade(foo, bar, cluster);
} finally {
if(cluster != null) cluster.shutdown();
}
}
private static void startRollingUpgrade(Path foo, Path bar,
MiniDFSCluster cluster) throws IOException {
final DistributedFileSystem dfs = cluster.getFileSystem();
//start rolling upgrade
dfs.rollingUpgrade(RollingUpgradeAction.START);
dfs.mkdirs(bar);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
}
private static void rollbackRollingUpgrade(Path foo, Path bar,
MiniDFSCluster cluster) throws IOException {
cluster.restartNameNode("-rollingUpgrade", "rollback");
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
}
@Test
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
// start a cluster