diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index baeab819806..b6cb70b6337 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -73,6 +73,9 @@ Release 2.6.1 - UNRELEASED HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause DataNode to register successfully with only one NameNode.(vinayakumarb) + HDFS-7788. Post-2.6 namenode may not start up with an image containing + inodes created with an old release. (Rushabh Shah via kihwal) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 5136f8b62d1..1dd6da392a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -103,6 +103,9 @@ public class INodeFile extends INodeWithAdditionalFields static long toLong(long preferredBlockSize, short replication, byte storagePolicyID) { long h = 0; + if (preferredBlockSize == 0) { + preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin(); + } h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h); h = REPLICATION.BITS.combine(replication, h); h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java index 863d9f744fa..9399d84f1e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java @@ -64,4 +64,8 @@ public class LongBitFormat implements Serializable { } return (record & ~MASK) | (value << OFFSET); } + + public long getMin() { + return MIN; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index f21834ea295..d19980cda4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -28,10 +28,13 @@ import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -40,10 +43,14 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.util.MD5FileUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; public class TestFSImage { + private static final String HADOOP_2_6_ZER0_BLOCK_SIZE_TGZ = + "image-with-zero-block-size.tar.gz"; @Test public void testPersist() throws IOException { Configuration conf = new Configuration(); @@ -183,4 +190,45 @@ public class TestFSImage { } } } + + /** + * In this test case, I have created an image with a file having + * preferredblockSize = 0. We are trying to read this image (since file with + * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode + * after 2.6 version will not be able to read this particular file. + * See HDFS-7788 for more information. + * @throws Exception + */ + @Test + public void testZeroBlockSize() throws Exception { + final Configuration conf = new HdfsConfiguration(); + String tarFile = System.getProperty("test.cache.data", "build/test/cache") + + "/" + HADOOP_2_6_ZER0_BLOCK_SIZE_TGZ; + String testDir = PathUtils.getTestDirName(getClass()); + File dfsDir = new File(testDir, "image-with-zero-block-size"); + if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) { + throw new IOException("Could not delete dfs directory '" + dfsDir + "'"); + } + FileUtil.unTar(new File(tarFile), new File(testDir)); + File nameDir = new File(dfsDir, "name"); + GenericTestUtils.assertExists(nameDir); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + nameDir.getAbsolutePath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .format(false) + .manageDataDfsDirs(false) + .manageNameDfsDirs(false) + .waitSafeMode(false) + .build(); + try { + FileSystem fs = cluster.getFileSystem(); + Path testPath = new Path("/tmp/zeroBlockFile"); + assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath)); + assertTrue("Name node didn't come up", cluster.isNameNodeUp(0)); + } finally { + cluster.shutdown(); + //Clean up + FileUtil.fullyDelete(dfsDir); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/image-with-zero-block-size.tar.gz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/image-with-zero-block-size.tar.gz new file mode 100644 index 00000000000..41f3105c352 Binary files /dev/null and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/image-with-zero-block-size.tar.gz differ