From 29b9049bde0521bfe5890248b845d0a22862c235 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 8 Jul 2019 15:19:42 +0800 Subject: [PATCH] HDFS-12862. CacheDirective becomes invalid when NN restart or failover. Contributed by Wang XL. (cherry picked from commit ec851e4db24fad68c1d70a981b4253c0207abc45) --- .../server/namenode/FSImageSerialization.java | 4 +- .../server/namenode/TestCacheDirectives.java | 64 +++++++++++++++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 4d8b627cb0e..706b2bda062 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -538,7 +538,7 @@ public class FSImageSerialization { writeString(directive.getPool(), out); } if (directive.getExpiration() != null) { - writeLong(directive.getExpiration().getMillis(), out); + writeLong(directive.getExpiration().getAbsoluteMillis(), out); } } @@ -610,7 +610,7 @@ public class FSImageSerialization { } if (directive.getExpiration() != null) { XMLUtils.addSaxString(contentHandler, "EXPIRATION", - "" + directive.getExpiration().getMillis()); + "" + directive.getExpiration().getAbsoluteMillis()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index b3e91e5072a..58baf730d75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -43,6 +43,7 @@ import java.util.LinkedList; import java.util.List; import org.apache.commons.lang3.time.DateUtils; +import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -60,9 +61,11 @@ import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirective; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration; import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; @@ -1605,4 +1608,65 @@ public class TestCacheDirectives { waitForCachedBlocks(namenode, expected, 0, "testAddingCacheDirectiveInfosWhenCachingIsDisabled:2"); } + + @Test(timeout=120000) + public void testExpiryTimeConsistency() throws Exception { + conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + MiniDFSCluster dfsCluster = + new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .build(); + dfsCluster.transitionToActive(0); + + DistributedFileSystem fs = dfsCluster.getFileSystem(0); + final NameNode ann = dfsCluster.getNameNode(0); + + final Path filename = new Path("/file"); + final short replication = (short) 3; + DFSTestUtil.createFile(fs, filename, 1, replication, 0x0BAC); + fs.addCachePool(new CachePoolInfo("pool")); + long id = fs.addCacheDirective( + new CacheDirectiveInfo.Builder().setPool("pool").setPath(filename) + .setExpiration(CacheDirectiveInfo.Expiration.newRelative(86400000)) + .setReplication(replication).build()); + fs.modifyCacheDirective(new CacheDirectiveInfo.Builder() + .setId(id) + .setExpiration(CacheDirectiveInfo.Expiration.newRelative(172800000)) + .build()); + final NameNode sbn = dfsCluster.getNameNode(1); + final CacheManager annCachemanager = ann.getNamesystem().getCacheManager(); + final CacheManager sbnCachemanager = sbn.getNamesystem().getCacheManager(); + HATestUtil.waitForStandbyToCatchUp(ann, sbn); + GenericTestUtils.waitFor(() -> { + boolean isConsistence = false; + ann.getNamesystem().readLock(); + try { + sbn.getNamesystem().readLock(); + try { + Iterator annDirectivesIt = annCachemanager. + getCacheDirectives().iterator(); + Iterator sbnDirectivesIt = sbnCachemanager. + getCacheDirectives().iterator(); + if (annDirectivesIt.hasNext() && sbnDirectivesIt.hasNext()) { + CacheDirective annDirective = annDirectivesIt.next(); + CacheDirective sbnDirective = sbnDirectivesIt.next(); + if (annDirective.getExpiryTimeString(). + equals(sbnDirective.getExpiryTimeString())) { + isConsistence = true; + } + } + } finally { + sbn.getNamesystem().readUnlock(); + } + } finally { + ann.getNamesystem().readUnlock(); + } + if (!isConsistence) { + LOG.info("testEexpiryTimeConsistency:" + + "ANN CacheDirective Status is inconsistent with SBN"); + } + return isConsistence; + }, 500, 120000); + } }