From 9b98cd9c1823ab65566fc0cef8dfba9e696d3b69 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 24 Jul 2014 23:44:14 +0000 Subject: [PATCH] HDFS-6696. Name node cannot start if the path of a file under construction contains .snapshot. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1613330 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSImageFormat.java | 11 ++ .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 137 ++++++++++++++++++ .../test/resources/hadoop-0.23-reserved.tgz | Bin 0 -> 4558 bytes .../src/test/resources/hadoop-1-reserved.tgz | Bin 0 -> 2572 bytes 5 files changed, 151 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b229a630342..aaf795eeed5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -658,6 +658,9 @@ Release 2.5.0 - UNRELEASED HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code when xattr doesn't exist. (Charles Lamb via umamahesh) + HDFS-6696. Name node cannot start if the path of a file under + construction contains ".snapshot". (wang) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 16c5a66eb7a..839bcbab980 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -620,6 +620,16 @@ public class FSImageFormat { INodeDirectory parentINode = fsDir.rootDir; for (long i = 0; i < numFiles; i++) { pathComponents = FSImageSerialization.readPathComponents(in); + for (int j=0; j < pathComponents.length; j++) { + byte[] newComponent = renameReservedComponentOnUpgrade + (pathComponents[j], getLayoutVersion()); + if (!Arrays.equals(newComponent, pathComponents[j])) { + String oldPath = DFSUtil.byteArray2PathString(pathComponents); + pathComponents[j] = newComponent; + String newPath = DFSUtil.byteArray2PathString(pathComponents); + LOG.info("Renaming reserved path " + oldPath + " to " + newPath); + } + } final INode newNode = loadINode( pathComponents[pathComponents.length-1], false, in, counter); @@ -933,6 +943,7 @@ public class FSImageFormat { oldnode = namesystem.dir.getInode(cons.getId()).asFile(); inSnapshot = true; } else { + path = renameReservedPathsOnUpgrade(path, getLayoutVersion()); final INodesInPath iip = fsDir.getLastINodeInPath(path); oldnode = INodeFile.valueOf(iip.getINode(0), path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 1e1f668f210..f5dbdceaa17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -70,6 +70,9 @@ public class TestDFSUpgradeFromImage { private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz"; + private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz"; + private static final String HADOOP023_RESERVED_IMAGE = + "hadoop-0.23-reserved.tgz"; private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz"; private static class ReferenceFileInfo { @@ -325,6 +328,140 @@ public class TestDFSUpgradeFromImage { } } + /** + * Test upgrade from a branch-1.2 image with reserved paths + */ + @Test + public void testUpgradeFromRel1ReservedImage() throws Exception { + unpackStorage(HADOOP1_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.my-reserved", + "/.user-snapshot", + "/.user-snapshot/.user-snapshot", + "/.user-snapshot/open", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot", + "/user", + "/user/andrew", + "/user/andrew/.user-snapshot", + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList toList = new ArrayList(); + toList.add(new Path("/")); + ArrayList found = new ArrayList(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /** + * Test upgrade from a 0.23.11 image with reserved paths + */ + @Test + public void testUpgradeFromRel023ReservedImage() throws Exception { + unpackStorage(HADOOP023_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.user-snapshot", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot" + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList toList = new ArrayList(); + toList.add(new Path("/")); + ArrayList found = new ArrayList(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + /** * Test upgrade from 2.0 image with a variety of .snapshot and .reserved * paths to test renaming on upgrade diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz new file mode 100644 index 0000000000000000000000000000000000000000..0f53f2adb27e1a8379e4bc0d74a9c98700228559 GIT binary patch literal 4558 zcmeIzjZ=~b90zc^)~u~`xy*u;?bI}N;Zm9<$Zbsxbv2fyXv%4pQuBq2R1|%h;P<~ zeptpvWNe1L8OaWZ@A>5u*ZIeWy?kc0q`jGDp7_P~#A;tABY)SB3$pWoDe^z~n z@UbVq?de_xxdKiu$QgD2)*Q;zc(1z{x|-t`jrbZ}Ji4-`dV^-LC2`$uhT0F%^$dID z*<)_RPXUidWHW4v+ZG&T_G7+;yk~W|#m1N7@P|P40EE9&0~yjoq)r-1oAbGPC^Y69 z8*UBLHtxT8w^o18V|(T;gT>RwS5d_GrsdiZBgM9ybh2w)ue`X!X`xyt-rQ$n?F*Pj zOV^?=_{DSz@iURwnUG0LbjJ-8CLJ4BH~-3ZbK?xeVx1T!iP~+uER+OLLZFgBUsli| z$oIv4?Tdy*&cHr_=RuB&Nmxu9ZAnQ6S`$(H@2}jdNaHDFq-60XB?4c4RrVKg+Hh$+ zG29<_xad@|PgWUuL;pZDXLxP=g%B=}DkQfLiO%=YzRG?;<|j>etHx!hpD(9jwp_kD zaKdQNx_^aAR%hm<5XW0a|J|H7{tV%qFWpKK3SE6DoVv2sADd>W=)F~X!D#QcSngA(hYnuF&RDD?uM=ITa*`plqUukOJuK}kg|b^0H}}d(-S~nC(zD~ zB4Fd*=CbIYch=R4dGsZQaXhgON%sm(o-s$6KT9ao%7A;PPU7mW6tBOW)?UT#i0U0s z5BXN{>Wax?n0VL64BigXXDW8i_ap%KB|%3S?<|{en!a6?l^L(sQ>%;_m zLCNwwX1mBVRc@3xjna$Y07YK9J9w$lzb_qy%wqq1QPi}*v?-CMJkZmnOnW>MknJq# zG2N7P%GE5$hz)IESxXOy?0(g3Ek`FuhEy(jgjOm9v^_CQO_#zjWhA7PpqQ;9+R-^v6L)y0ibQ&z8N~|R#}8p9>HR|8l?ZOCO&F|uJD~&=HUH92 z;nvdzI5FVIt*zd~Sr(6}nT6OwB(fda-?>x<;=Y@GJKNnLK;BphoqAfs?ttbZ1PKt0 zhuiMlVpmC5)O%cKmn(rCa^z z!TydX=5Slq?Y>Kko7or1p$FZ{as$T(^VH7ntp>}`#KN`CWB45`g4a4DE0`5{_BCar z1P@nf8Zcp^d+_jYNRpk~`fCXSIQ2sj!9K{~h0QKk#Z^8?ZFcLc&Hndwge=7vnaJ*F zY*XmbMMY9*hIDb zTGLCRZ3uk#3v%j32*$+I<)L6c>gErz7Sj_;X6Q&#A?i+>-DDQjmUKOORujJyMG_Wf z9RbY(R-UaKwH+`WrEon~=^6` z*{t(|7K}iJSb$>(_t?T%U&Kzm{UGD4^4O^^r`T)f1^15)MPJx2)4Iq7a?CVt$f**Rs>nq|P3ENNKBsw56s7fr+-Q%qJLTq+)*8nTJK# z%EyJu5}OhlW+5gbxkV%j&QLTJuoxLWp`bWY`D}OlX?MH*1MB@KynlJY>5g_fg*@8M zerm9b_z8Bi-6+bVw?-thjbEaA;P}gh4_x4|3 z5-@|ZOsf(*j6Kl$`x~uDr$ws^RJ-@Pb^Y-pF%n386994p%`AQe zFRR5%y*m1Rg7$+@d_Xa%-wc~ThV)~$=-he$7t71_DUqebHsn5jHKbryxA9w>n_4Lo zig88{5}-583K2DaBrXQvS{;XN1?fUZggJBQ|GLXKt&jLI=Xy*9U{B9ex3AG5Y z$mN*?qfS#wos|%?UKi02A+hDAZte-|JolOlMOr%or`PsU%=W1L< zeNg~)zPBS2{qrveZ_%xnNBSyyrQ9yKR{TW0v$+$^y@VlYTd1V7Ryx!B=7W6H_L>r( zlo)vaOWIb>u4j^g%+|{Cs?X-*0v@zyS1|T>qslSrSx%08{L9-LV<{DTccQtv2FlzL z)R=C}ot=iRU+iuL`I9e}d7Glf0Inrns4NU|BcekKzSL#oh3<*HYCYd;M^mJ`{6?2yq0OYXuy<;W2c{YI5wiVx^ zZD7I%?ME$u)=1ec^; zTx^l{^ezi?E*;RoRPz+VVLB?>^_2J9aD2cLxoVZ_rAMFAzYkJAO)5KX_n{95J{q~NDI-Wd8mh9#HC>elkZavDdlYeMnI zmG!01lXf`wM;J!Rjuv&x|S(ec%FR_UvwgfT#vXJbTQ#6BE0p0;qA4p zz6{|bf1Kw9%Q!o4w(zU`tj}TQ#THHg`zE#w*$!P0mIxefVvO~gcJhKoW`Z5Sy2m*& z!j5|5RCOV1?ztR}iY=h?W;25J1pAE((@$TX_jgSzXY6ih)kRzH^yhncH&6N}#S~(s zu~WaBL5EDVD44ES;G{RqNN1dW#>%SUn1RpfC+Z|L%UQtoIC7U9+(3v(`P-tGQU75P zJR@5&@%o<@379>Bzjoxc4UjGl{RL8Fh%r399%lr3pDWF(Jt@mDbl2;77DfbEx8A5n z^