HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes created with an old release. Contributed by Rushabh Shah.
(cherry picked from commit7ae5255a16
) (cherry picked from commitb9157f92fc
) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/site/resources/image-with-zero-block-size.tar.gz hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java (cherry picked from commit 1faa44d8f4d7b944e99dd0470ea2638c7653a131)
This commit is contained in:
parent
4ec7b6174d
commit
02e0b6e306
|
@ -73,6 +73,9 @@ Release 2.6.1 - UNRELEASED
|
||||||
HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
|
HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
|
||||||
DataNode to register successfully with only one NameNode.(vinayakumarb)
|
DataNode to register successfully with only one NameNode.(vinayakumarb)
|
||||||
|
|
||||||
|
HDFS-7788. Post-2.6 namenode may not start up with an image containing
|
||||||
|
inodes created with an old release. (Rushabh Shah via kihwal)
|
||||||
|
|
||||||
Release 2.6.0 - 2014-11-18
|
Release 2.6.0 - 2014-11-18
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -103,6 +103,9 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
static long toLong(long preferredBlockSize, short replication,
|
static long toLong(long preferredBlockSize, short replication,
|
||||||
byte storagePolicyID) {
|
byte storagePolicyID) {
|
||||||
long h = 0;
|
long h = 0;
|
||||||
|
if (preferredBlockSize == 0) {
|
||||||
|
preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
|
||||||
|
}
|
||||||
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
|
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
|
||||||
h = REPLICATION.BITS.combine(replication, h);
|
h = REPLICATION.BITS.combine(replication, h);
|
||||||
h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
|
h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
|
||||||
|
|
|
@ -64,4 +64,8 @@ public class LongBitFormat implements Serializable {
|
||||||
}
|
}
|
||||||
return (record & ~MASK) | (value << OFFSET);
|
return (record & ~MASK) | (value << OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getMin() {
|
||||||
|
return MIN;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,10 +28,13 @@ import org.junit.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
@ -40,10 +43,14 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||||
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestFSImage {
|
public class TestFSImage {
|
||||||
|
|
||||||
|
private static final String HADOOP_2_6_ZER0_BLOCK_SIZE_TGZ =
|
||||||
|
"image-with-zero-block-size.tar.gz";
|
||||||
@Test
|
@Test
|
||||||
public void testPersist() throws IOException {
|
public void testPersist() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
|
@ -183,4 +190,45 @@ public class TestFSImage {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In this test case, I have created an image with a file having
|
||||||
|
* preferredblockSize = 0. We are trying to read this image (since file with
|
||||||
|
* preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode
|
||||||
|
* after 2.6 version will not be able to read this particular file.
|
||||||
|
* See HDFS-7788 for more information.
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testZeroBlockSize() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
||||||
|
+ "/" + HADOOP_2_6_ZER0_BLOCK_SIZE_TGZ;
|
||||||
|
String testDir = PathUtils.getTestDirName(getClass());
|
||||||
|
File dfsDir = new File(testDir, "image-with-zero-block-size");
|
||||||
|
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
|
||||||
|
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
|
||||||
|
}
|
||||||
|
FileUtil.unTar(new File(tarFile), new File(testDir));
|
||||||
|
File nameDir = new File(dfsDir, "name");
|
||||||
|
GenericTestUtils.assertExists(nameDir);
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
|
nameDir.getAbsolutePath());
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
|
.format(false)
|
||||||
|
.manageDataDfsDirs(false)
|
||||||
|
.manageNameDfsDirs(false)
|
||||||
|
.waitSafeMode(false)
|
||||||
|
.build();
|
||||||
|
try {
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
Path testPath = new Path("/tmp/zeroBlockFile");
|
||||||
|
assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
|
||||||
|
assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
//Clean up
|
||||||
|
FileUtil.fullyDelete(dfsDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
Loading…
Reference in New Issue