HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes created with an old release. Contributed by Rushabh Shah.

This commit is contained in:
Kihwal Lee 2015-02-20 09:06:07 -06:00
parent b9a17909ba
commit 7ae5255a16
5 changed files with 58 additions and 0 deletions

View File

@ -992,6 +992,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java. HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
(Arshad Mohammad via wheat9) (Arshad Mohammad via wheat9)
HDFS-7788. Post-2.6 namenode may not start up with an image containing
inodes created with an old release. (Rushabh Shah via kihwal)
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

View File

@ -107,6 +107,9 @@ public class INodeFile extends INodeWithAdditionalFields
static long toLong(long preferredBlockSize, short replication, static long toLong(long preferredBlockSize, short replication,
byte storagePolicyID) { byte storagePolicyID) {
long h = 0; long h = 0;
if (preferredBlockSize == 0) {
preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
}
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h); h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
h = REPLICATION.BITS.combine(replication, h); h = REPLICATION.BITS.combine(replication, h);
h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h); h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);

View File

@ -64,4 +64,8 @@ public class LongBitFormat implements Serializable {
} }
return (record & ~MASK) | (value << OFFSET); return (record & ~MASK) | (value << OFFSET);
} }
public long getMin() {
return MIN;
}
} }

View File

@ -28,10 +28,13 @@ import org.junit.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -40,10 +43,14 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Test; import org.junit.Test;
public class TestFSImage { public class TestFSImage {
private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
"image-with-zero-block-size.tar.gz";
@Test @Test
public void testPersist() throws IOException { public void testPersist() throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
@ -183,4 +190,45 @@ public class TestFSImage {
} }
} }
} }
/**
* In this test case, I have created an image with a file having
* preferredblockSize = 0. We are trying to read this image (since file with
* preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode
* after 2.6 version will not be able to read this particular file.
* See HDFS-7788 for more information.
* @throws Exception
*/
@Test
public void testZeroBlockSize() throws Exception {
final Configuration conf = new HdfsConfiguration();
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
String testDir = PathUtils.getTestDirName(getClass());
File dfsDir = new File(testDir, "image-with-zero-block-size");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile), new File(testDir));
File nameDir = new File(dfsDir, "name");
GenericTestUtils.assertExists(nameDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.waitSafeMode(false)
.build();
try {
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/tmp/zeroBlockFile");
assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
} finally {
cluster.shutdown();
//Clean up
FileUtil.fullyDelete(dfsDir);
}
}
} }