HDFS-7843. A truncated file is corrupted after rollback from a rolling upgrade.

This commit is contained in:
Tsz-Wo Nicholas Sze 2015-02-26 10:14:40 +08:00
parent 4e400030f6
commit 12fe3afcd3
3 changed files with 42 additions and 16 deletions

View File

@ -729,6 +729,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7763. fix zkfc hung issue due to not catching exception in a corner HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
case. (Liang Xie via wang) case. (Liang Xie via wang)
HDFS-7843. A truncated file is corrupted after rollback from a rolling
upgrade. (szetszwo)
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

View File

@ -2131,6 +2131,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if(!isUpgradeFinalized()) { if(!isUpgradeFinalized()) {
return true; return true;
} }
if (isRollingUpgrade()) {
return true;
}
return file.isBlockInLatestSnapshot(blk); return file.isBlockInLatestSnapshot(blk);
} }

View File

@ -23,9 +23,11 @@ import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@ -36,6 +38,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.junit.Assert; import org.junit.Assert;
@ -260,42 +263,50 @@ public class TestRollingUpgrade {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive(); cluster.waitActive();
final Path foo = new Path("/foo"); final Path foo = new Path("/foo");
final Path bar = new Path("/bar"); final Path bar = new Path("/bar");
cluster.getFileSystem().mkdirs(foo); cluster.getFileSystem().mkdirs(foo);
startRollingUpgrade(foo, bar, cluster); final Path file = new Path(foo, "file");
cluster.getFileSystem().rollEdits(); final byte[] data = new byte[1024];
cluster.getFileSystem().rollEdits(); DFSUtil.getRandom().nextBytes(data);
rollbackRollingUpgrade(foo, bar, cluster); final FSDataOutputStream out = cluster.getFileSystem().create(file);
out.write(data, 0, data.length);
out.close();
startRollingUpgrade(foo, bar, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
cluster.getFileSystem().rollEdits(); cluster.getFileSystem().rollEdits();
cluster.getFileSystem().rollEdits(); cluster.getFileSystem().rollEdits();
rollbackRollingUpgrade(foo, bar, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
cluster.getFileSystem().rollEdits();
cluster.getFileSystem().rollEdits();
rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, file, data, cluster);
cluster.restartNameNode(); cluster.restartNameNode();
rollbackRollingUpgrade(foo, bar, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
cluster.restartNameNode(); cluster.restartNameNode();
rollbackRollingUpgrade(foo, bar, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
rollbackRollingUpgrade(foo, bar, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
rollbackRollingUpgrade(foo, bar, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster);
} finally { } finally {
if(cluster != null) cluster.shutdown(); if(cluster != null) cluster.shutdown();
} }
} }
private static void startRollingUpgrade(Path foo, Path bar, private static void startRollingUpgrade(Path foo, Path bar,
Path file, byte[] data,
MiniDFSCluster cluster) throws IOException { MiniDFSCluster cluster) throws IOException {
final DistributedFileSystem dfs = cluster.getFileSystem(); final DistributedFileSystem dfs = cluster.getFileSystem();
@ -305,18 +316,27 @@ public class TestRollingUpgrade {
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar); dfs.mkdirs(bar);
Assert.assertTrue(dfs.exists(foo)); Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar)); Assert.assertTrue(dfs.exists(bar));
//truncate a file
final int newLength = DFSUtil.getRandom().nextInt(data.length - 1) + 1;
dfs.truncate(file, newLength);
TestFileTruncate.checkBlockRecovery(file, dfs);
AppendTestUtil.checkFullFile(dfs, file, newLength, data);
} }
private static void rollbackRollingUpgrade(Path foo, Path bar, private static void rollbackRollingUpgrade(Path foo, Path bar,
Path file, byte[] data,
MiniDFSCluster cluster) throws IOException { MiniDFSCluster cluster) throws IOException {
final DataNodeProperties dnprop = cluster.stopDataNode(0);
cluster.restartNameNode("-rollingUpgrade", "rollback"); cluster.restartNameNode("-rollingUpgrade", "rollback");
cluster.restartDataNode(dnprop, true);
final DistributedFileSystem dfs = cluster.getFileSystem(); final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo)); Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar)); Assert.assertFalse(dfs.exists(bar));
AppendTestUtil.checkFullFile(dfs, file, data.length, data);
} }
@Test @Test