From f3143d225afec0ad95d9e4b81b91d760b5b77c52 Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Thu, 7 Dec 2017 11:15:40 -0800 Subject: [PATCH] HDFS-12840. Creating a file with non-default EC policy in a EC zone is not correctly serialized in the editlog. Contributed by Lei (Eddy) Xu. (cherry picked from commit 67662e2ac9e68f32b725c8118cf2be79a662fca5) --- .../io/erasurecode/ErasureCodeConstants.java | 2 +- .../server/namenode/FSDirWriteFileOp.java | 21 +- .../hdfs/server/namenode/FSEditLog.java | 3 +- .../hdfs/server/namenode/FSEditLogLoader.java | 2 +- .../hdfs/server/namenode/FSEditLogOp.java | 26 + .../hdfs/server/namenode/INodeFile.java | 14 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 23 + .../TestDistributedFileSystemWithECFile.java | 55 ++ .../namenode/OfflineEditsViewerHelper.java | 4 +- .../namenode/TestNamenodeRetryCache.java | 14 +- .../namenode/ha/TestRetryCacheWithHA.java | 13 +- .../src/test/resources/editsStored | Bin 6753 -> 7909 bytes .../src/test/resources/editsStored.xml | 578 ++++++++++++------ 13 files changed, 552 insertions(+), 203 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java index 73b8f560ad3..2eac016e0f8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java @@ -52,6 +52,6 @@ public final class ErasureCodeConstants { public static final byte MAX_POLICY_ID = Byte.MAX_VALUE; public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64; - public static final byte REPLICATION_POLICY_ID = (byte) 63; + public static final byte REPLICATION_POLICY_ID = (byte) 0; public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index b2022126693..c4041a33716 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.util.ChunkedArrayList; @@ -415,22 +416,28 @@ class FSDirWriteFileOp { PermissionStatus permissions, List aclEntries, List xAttrs, short replication, long modificationTime, long atime, long preferredBlockSize, boolean underConstruction, String clientName, - String clientMachine, byte storagePolicyId) { + String clientMachine, byte storagePolicyId, byte ecPolicyID) { final INodeFile newNode; Preconditions.checkNotNull(existing); assert fsd.hasWriteLock(); try { // check if the file has an EC policy - boolean isStriped = false; - ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp. - unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing); - if (ecPolicy != null) { - isStriped = true; + boolean isStriped = + ecPolicyID != ErasureCodeConstants.REPLICATION_POLICY_ID; + ErasureCodingPolicy ecPolicy = null; + if (isStriped) { + ecPolicy = fsd.getFSNamesystem().getErasureCodingPolicyManager() + .getByID(ecPolicyID); + if (ecPolicy == null) { + throw new IOException(String.format( + "Cannot find erasure coding policy for new file %s/%s, " + + "ecPolicyID=%d", + existing.getPath(), Arrays.toString(localName), ecPolicyID)); + } } final BlockType blockType = isStriped ? BlockType.STRIPED : BlockType.CONTIGUOUS; final Short replicationFactor = (!isStriped ? replication : null); - final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null); if (underConstruction) { newNode = newINodeFile(id, permissions, modificationTime, modificationTime, replicationFactor, ecPolicyID, preferredBlockSize, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 7ca63f8e48a..0bbf710de5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -816,7 +816,8 @@ public class FSEditLog implements LogsPurgeable { .setClientMachine( newNode.getFileUnderConstructionFeature().getClientMachine()) .setOverwrite(overwrite) - .setStoragePolicyId(newNode.getLocalStoragePolicyID()); + .setStoragePolicyId(newNode.getLocalStoragePolicyID()) + .setErasureCodingPolicyId(newNode.getErasureCodingPolicyID()); AclFeature f = newNode.getAclFeature(); if (f != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index a21b8ea914a..b0fe60a77bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -385,7 +385,7 @@ public class FSEditLogLoader { addCloseOp.xAttrs, replication, addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, addCloseOp.clientName, addCloseOp.clientMachine, - addCloseOp.storagePolicyId); + addCloseOp.storagePolicyId, addCloseOp.erasureCodingPolicyId); assert newFile != null; iip = INodesInPath.replace(iip, iip.length() - 1, newFile); fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 2dc9d33c519..c0daaf19ebb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -127,6 +127,7 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactory; import org.apache.hadoop.io.erasurecode.ECSchema; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.apache.hadoop.ipc.ClientId; import org.apache.hadoop.ipc.RpcConstants; import org.apache.hadoop.security.token.delegation.DelegationKey; @@ -425,10 +426,12 @@ public abstract class FSEditLogOp { String clientMachine; boolean overwrite; byte storagePolicyId; + byte erasureCodingPolicyId; private AddCloseOp(FSEditLogOpCodes opCode) { super(opCode); storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + erasureCodingPolicyId = ErasureCodeConstants.REPLICATION_POLICY_ID; assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND); } @@ -449,6 +452,7 @@ public abstract class FSEditLogOp { clientMachine = null; overwrite = false; storagePolicyId = 0; + erasureCodingPolicyId = ErasureCodeConstants.REPLICATION_POLICY_ID; } T setInodeId(long inodeId) { @@ -535,6 +539,11 @@ public abstract class FSEditLogOp { return (T)this; } + T setErasureCodingPolicyId(byte ecPolicyId) { + this.erasureCodingPolicyId = ecPolicyId; + return (T)this; + } + @Override public void writeFields(DataOutputStream out) throws IOException { FSImageSerialization.writeLong(inodeId, out); @@ -555,6 +564,7 @@ public abstract class FSEditLogOp { FSImageSerialization.writeString(clientMachine,out); FSImageSerialization.writeBoolean(overwrite, out); FSImageSerialization.writeByte(storagePolicyId, out); + FSImageSerialization.writeByte(erasureCodingPolicyId, out); // write clientId and callId writeRpcIds(rpcClientId, rpcCallId, out); } @@ -633,6 +643,14 @@ public abstract class FSEditLogOp { this.storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; } + + if (NameNodeLayoutVersion.supports( + NameNodeLayoutVersion.Feature.ERASURE_CODING, logVersion)) { + this.erasureCodingPolicyId = FSImageSerialization.readByte(in); + } else { + this.erasureCodingPolicyId = + ErasureCodeConstants.REPLICATION_POLICY_ID; + } // read clientId and callId readRpcIds(in, logVersion); } else { @@ -695,6 +713,8 @@ public abstract class FSEditLogOp { } builder.append(", storagePolicyId="); builder.append(storagePolicyId); + builder.append(", erasureCodingPolicyId="); + builder.append(erasureCodingPolicyId); builder.append(", opCode="); builder.append(opCode); builder.append(", txid="); @@ -730,6 +750,8 @@ public abstract class FSEditLogOp { if (aclEntries != null) { appendAclEntriesToXml(contentHandler, aclEntries); } + XMLUtils.addSaxString(contentHandler, "ERASURE_CODING_POLICY_ID", + Byte.toString(erasureCodingPolicyId)); appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); } } @@ -758,6 +780,10 @@ public abstract class FSEditLogOp { } this.permissions = permissionStatusFromXml(st); aclEntries = readAclEntriesFromXml(st); + if (st.hasChildren("ERASURE_CODING_POLICY_ID")) { + this.erasureCodingPolicyId = Byte.parseByte(st.getValue( + "ERASURE_CODING_POLICY_ID")); + } readRpcIdsFromXml(st); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index fcdb064fd66..3f2fb33d9a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.util.LongBitFormat; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.apache.hadoop.util.StringUtils; /** I-node for closed file. */ @@ -200,9 +201,10 @@ public class INodeFile extends INodeWithAdditionalFields // as the PolicyID can never be in negative. layoutRedundancy |= erasureCodingPolicyID; } else { - Preconditions.checkArgument(replication != null && - erasureCodingPolicyID == null); - Preconditions.checkArgument(replication >= 0 && + Preconditions.checkArgument(erasureCodingPolicyID == null || + erasureCodingPolicyID == + ErasureCodeConstants.REPLICATION_POLICY_ID); + Preconditions.checkArgument(replication != null && replication >= 0 && replication <= MAX_REDUNDANCY, "Invalid replication value " + replication); layoutRedundancy |= replication; @@ -588,10 +590,8 @@ public class INodeFile extends INodeWithAdditionalFields setStoragePolicyID(storagePolicyId); } - /** - * @return The ID of the erasure coding policy on the file. -1 represents no - * EC policy. + * @return The ID of the erasure coding policy on the file. */ @VisibleForTesting @Override @@ -599,7 +599,7 @@ public class INodeFile extends INodeWithAdditionalFields if (isStriped()) { return HeaderFormat.getECPolicyID(header); } - return -1; + return ErasureCodeConstants.REPLICATION_POLICY_ID; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 1048b0aca25..1411a7ffded 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1510,6 +1510,29 @@ public class DFSTestUtil { // OP_REMOVE_ERASURE_CODING_POLICY filesystem.removeErasureCodingPolicy(newPolicy1.getName()); filesystem.removeErasureCodingPolicy(newPolicy2.getName()); + + // OP_ADD on erasure coding directory + Path ecDir = new Path("/ec"); + filesystem.mkdirs(ecDir); + final ErasureCodingPolicy defaultEcPolicy = + SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_6_3_POLICY_ID); + final ErasureCodingPolicy ecPolicyRS32 = + SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_3_2_POLICY_ID); + filesystem.enableErasureCodingPolicy(ecPolicyRS32.getName()); + filesystem.enableErasureCodingPolicy(defaultEcPolicy.getName()); + filesystem.setErasureCodingPolicy(ecDir, defaultEcPolicy.getName()); + + try (FSDataOutputStream out = filesystem.createFile( + new Path(ecDir, "replicated")).replicate().build()) { + out.write("replicated".getBytes()); + } + + try (FSDataOutputStream out = filesystem.createFile( + new Path(ecDir, "RS-3-2")).ecPolicyName(ecPolicyRS32.getName()).build()) { + out.write("RS-3-2".getBytes()); + } } public static void abortStream(DFSOutputStream out) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java index 14a2ec494a6..0a3010f0e49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java @@ -19,11 +19,13 @@ package org.apache.hadoop.hdfs; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -32,6 +34,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; /** @@ -194,4 +198,55 @@ public class TestDistributedFileSystemWithECFile { assertTrue(lastBlock.getOffset() == blockGroupSize); assertTrue(lastBlock.getLength() == lastBlockSize); } + + @Test(timeout=60000) + public void testReplayEditLogsForReplicatedFile() throws Exception { + cluster.shutdown(); + + ErasureCodingPolicy rs63 = SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_6_3_POLICY_ID + ); + ErasureCodingPolicy rs32 = SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_3_2_POLICY_ID + ); + // Test RS(6,3) as default policy + int numDataNodes = rs63.getNumDataUnits() + rs63.getNumParityUnits(); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(numDataNodes) + .build(); + + cluster.transitionToActive(0); + fs = cluster.getFileSystem(0); + fs.enableErasureCodingPolicy(rs63.getName()); + fs.enableErasureCodingPolicy(rs32.getName()); + + Path dir = new Path("/ec"); + fs.mkdirs(dir); + fs.setErasureCodingPolicy(dir, rs63.getName()); + + // Create an erasure coded file with the default policy. + Path ecFile = new Path(dir, "ecFile"); + createFile(ecFile.toString(), 10); + // Create a replicated file. + Path replicatedFile = new Path(dir, "replicated"); + try (FSDataOutputStream out = fs.createFile(replicatedFile) + .replicate().build()) { + out.write(123); + } + // Create an EC file with a different policy. + Path ecFile2 = new Path(dir, "RS-3-2"); + try (FSDataOutputStream out = fs.createFile(ecFile2) + .ecPolicyName(rs32.getName()).build()) { + out.write(456); + } + + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + + fs = cluster.getFileSystem(1); + assertNull(fs.getErasureCodingPolicy(replicatedFile)); + assertEquals(rs63, fs.getErasureCodingPolicy(ecFile)); + assertEquals(rs32, fs.getErasureCodingPolicy(ecFile2)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index b5ba108af83..d637af5b497 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -98,8 +98,10 @@ public class OfflineEditsViewerHelper { config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + final int numDataNodes = 9; cluster = - new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build(); + new MiniDFSCluster.Builder(config).manageNameDfsDirs(false) + .numDataNodes(numDataNodes).build(); cluster.waitClusterUp(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index 42ff6989e1a..0995f135d97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -43,9 +43,11 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.ipc.ClientId; @@ -79,6 +81,11 @@ import org.junit.Test; public class TestNamenodeRetryCache { private static final byte[] CLIENT_ID = ClientId.getClientId(); private static MiniDFSCluster cluster; + private static ErasureCodingPolicy defaultEcPolicy = + SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_6_3_POLICY_ID); + private static int numDataNodes = defaultEcPolicy.getNumDataUnits() + + defaultEcPolicy.getNumParityUnits() + 1; private static NamenodeProtocols nnRpc; private static final FsPermission perm = FsPermission.getDefault(); private static DistributedFileSystem filesystem; @@ -93,7 +100,8 @@ public class TestNamenodeRetryCache { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); - cluster = new MiniDFSCluster.Builder(conf).build(); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numDataNodes).build(); cluster.waitActive(); nnRpc = cluster.getNameNode().getRpcServer(); filesystem = cluster.getFileSystem(); @@ -436,7 +444,7 @@ public class TestNamenodeRetryCache { LightWeightCache cacheSet = (LightWeightCache) namesystem.getRetryCache().getCacheSet(); - assertEquals("Retry cache size is wrong", 34, cacheSet.size()); + assertEquals("Retry cache size is wrong", 39, cacheSet.size()); Map oldEntries = new HashMap(); @@ -455,7 +463,7 @@ public class TestNamenodeRetryCache { assertTrue(namesystem.hasRetryCache()); cacheSet = (LightWeightCache) namesystem .getRetryCache().getCacheSet(); - assertEquals("Retry cache size is wrong", 34, cacheSet.size()); + assertEquals("Retry cache size is wrong", 39, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 1d114d62e4d..43aaa927037 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -67,11 +67,13 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -93,7 +95,12 @@ public class TestRetryCacheWithHA { private static final Log LOG = LogFactory.getLog(TestRetryCacheWithHA.class); private static final int BlockSize = 1024; - private static final short DataNodes = 3; + private static ErasureCodingPolicy defaultEcPolicy = + SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_6_3_POLICY_ID); + private static final short DataNodes = (short)( + defaultEcPolicy.getNumDataUnits() + + defaultEcPolicy.getNumParityUnits() + 1); private static final int CHECKTIMES = 10; private static final int ResponseSize = 3; @@ -166,7 +173,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn0 = cluster.getNamesystem(0); LightWeightCache cacheSet = (LightWeightCache) fsn0.getRetryCache().getCacheSet(); - assertEquals("Retry cache size is wrong", 34, cacheSet.size()); + assertEquals("Retry cache size is wrong", 39, cacheSet.size()); Map oldEntries = new HashMap(); @@ -187,7 +194,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn1 = cluster.getNamesystem(1); cacheSet = (LightWeightCache) fsn1 .getRetryCache().getCacheSet(); - assertEquals("Retry cache size is wrong", 34, cacheSet.size()); + assertEquals("Retry cache size is wrong", 39, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 3f2817a794d1a022a90eed6b539661b04d88e33c..a0ae78eea82f1b65dec44c1d2fbc146e3397a06e 100644 GIT binary patch literal 7909 zcmcgx32;-_6@AjTjl2NHfZ1bVVgd%)l5D&{5i3ONd_ApqoEqoBojD(|6APY3Yph z3(wyARjJxS(ld*93^NXVTi<**`cXtdN*bE#4(k8;;I6<4XtQRCF@W@TZe|o+;_f8MVJ^zt%-@4NKB?fBZ z(F{z>66um!<1B^=vn{A}6jith6qU}`M88JcSnBZqowsL>EnR%s2tYk37$H~(B10m# zlj^%Az_sXz!VW^DFZ1e@9}a{ooEMbn;PJq4SO~IKf(m%O-UmG|T$|YkT7^$cyFNOf z&)`Y*W#}^q!+~@T_JKBx{qLN8@tE~gGy>_u6kfGc=L?Ti=ZuBl)|wznKOV1ZAon_r zo;UG04mFSW#|HM3XvX_0N3BxK0spJ{?4+(3ezqDP(o7zyo0jiQO|wyFxa=1F`bbTG57Q`hRjb0_0s^%M(4x`zhJ|Lkrlb_k?sS(qJQmH-XV~r}9O)1KFY*|@I*#EB zXVnasdCg!wjMFVr>fKQ|?HF0{@4xpJW+N7Bq%epa9z+kmSSJG>KK!UrzK_K^`{1_i zJMobbi#1LI6V1gs87BG>a{O^c_-}D*!U;j?7c^p>^SiG9!nIWx^ba)9d5_fH-;c}W zU~~r=5NJRQ)ym?Di>no&@<9Ea8J)o-nHnM&15+ouVWW1qXF}psfACqu7+e@qxOsxc z%+fCD3s_UAR5iaCYlb#~VAOP>iybVBOV%_WpVk%31r-MdB^0_GB##7honDiQV_IUr z{yiqN2Y)~knUW(&cfOxGs*CI%o5$`dRXkz~Ne1T9nc@uoKG>AI_*PP)5z-So1zoUZ zpz@FIT-cPL>UbKiL8Pmja;t9P7Lrcy2}#l%l-Nh~u#a42YkhO(l@E;IB%ml!WC%H? z=)goZ)H5uybnX0K>^l)Ehc%EPnozV(5J$h3gAoFE((5`eBmr!Au-@CV+z25A@KCr$ zlQ44f{&Q+z&K9ZV=#}n;YT#iV0!tM5nuzKLetX~htMO57#4eEGOu=u5JSfxP0d5nR zGaal?!F2F;+OKN&WrO{y_Lks{WA}f0AIm_KCj0@xZ4*T91Y0nBL1kn$`*>Qx^8<~- zQVNO7Iv?{T4`xjBk;?yL;fZD=bc#8oYk^Eic=z-B7pUiuzokgm8(bypa1O<5i#d2a zh=nB$2a3Y>iA-^(&e>vwR)J!HYcz@Ydhg;TY7{GW0Lg9?++E)tQJ$msgro(d(CWy5 zLPrC4w_I2t z!@TR)Xvszvl06IZgqUypaMzhuBZL(EpvNc~j+4oSTl%Z{F^=I%ck17*r0b-KHINqq zM|)EGa&4w&M*r{-tak%Ob?J-*EayjIf6 z$dDvuAfT{teg+%;av+>frGwFfB)s&TQ>>*Hu-PS68${ZyMz?ANl3r z?7f0hc;Ue9sxx^Mw?}*tOa7dLfkt5^ z4F=xLIv=w*NIR`BVx_}RrT@kVoq~K5F}w`lF$((~IjSb#B}$+_NPcbzShI> zto@-OER65;US^7h(i80?@tWBE!@>rEAwOdIRyXZ|u8mF{FbmIzLwW2uVxU@sTQ_1z zW9K1Y$sa~CH#bV_fE5>EfbX)sBjRG1P7JGYD=ui&L8~6Lp7urd>;ce&vjU!C-X7+JGcp*PfiwA75Q`OJna;I0vHAH_d< z>xiMarmCv$C8dr;(W^pxWsRaappDHXe-6j9lz1&DB9T+3n$k>GQ&xqh$EMyF&y}GE zZjmU(Bnmt-Oct{#UEAf&#wEup(!f-JC=UbKJbTy5@#rByN*)TbW#QeU=hIanr4Ix7 z#mwEy52A+vDSIf$))#6=wc#|4hCh^YrsNnRr7A^ivC6qFbnt=%9Uz8MX$}@pJiZG_ z3@^FGY;C86+wzAFzoK@kpiUVuQ|&u#m)>r>k5707B3Px zZM-m`QGn|p!9{wqFA|Oa{J8b35n#lNL^Wc{aL#^t^P=nO9h<+7Nv->qzjb>c7>P5Y zGk83>WYZAv_Rz}S9_CcOas@ZJ3hV+|#1#CtQ;S^KmT`x}nxN2H_ye;>c%o6H){q+M z=O3W^={9CTTujSdNEFtd@d-Bs#K`UK*J$@->#n`u>^Ycl@PrY-MBUx>a03!MA~QYU zW21+)OKrXO&OL*5N5-PmsK=tKr){u(td*c-uuVVsSyjOhta&Nx@x5TcN~lqTl^Qkh z2W25}6$O#nLhAi-R+kmH6^hX;=Fwj7k>)ywTdL?-!aG(Ohb7kbf{qyYL#d0r{>ke9 E0R}Z1asU7T delta 2415 zcmaEA`_N>9RtA^rE(wn3uGMFa^E#R@%D})N3&bG6%+$r008}Bv!G6Z#)VCwm zTYfq5Gl690fe0XAflv-i3_SX2nK`NP$wjG&C8-RI3=E9%4}~5bhtZSm*{qF?42?}J zjg2i$4C4(MI1P==^$dW}5U5yS$sG=v+!U9vDD_LN1)9s+cBk^kWJX36L6~Vj zkW7L&tZ#I^L+-}9L#uVaJ9^%Ef^(_~pJ zO9F~Ev#@J2Y61i6i6PK#MkcJGp?vL)_sWVlPUpvC*`=06-g zjG)kc5(qbHDpxA!1L@9cD7!Hv@7P30`U z5Geuj)q=^>c_CKiK;?7Du*y94n+?}y3qGh-&2Xdk@~5(Vnm0>(vYLRW3dqA9SOeMq zLsD)h$T{;SPZm%HhfFurjBYZV<5+du<;G+q!GBU@A8dw+@tSvA7%?Wfy8OSX_5IXslC_Edyb$~lZUd&XGqx;KM6`&elkU}5`o17}9 z8;>Q62G~AboT>^`VG35qz!1*BT9BWg1IjrZP@!b3!4kOqbaCBtBD=fr<^F8X}=Iu+(N?0zzzMd+@C70?f)l1zJ$WQE*Mzl!w?~x@!7O z4X8#FsxBJlA8g7)!=+AoYXDUSKvl;~J}2%XG%-*0t2R)?6)GC5H2J)^*kmgSK5Ru` zcq*GE?_@CvEj=SCW^)Tmtp12t`o{j64$wRYsG0GyKrzPL9AM%{)o>!0n+LNQNxf3)yf9s11l+D`3p=e3>+z?xswAV zh3l~-i|F{r%DF~BWx61>K#;`185FE*tZSrek&V^Tm~X#ATtGVX!0H(ok{P&wI?Qwp z4RlSgYl+RA#(K(_p&nsN3L#tK_855h8zb40O3;>gB{R_vCO|DPucV<_QjgJ~NSF{| zYyi>%^GiBGTN0C4#6N)2oCpwuKn5f~esq|^yhjz3cwrKmFo_>&TCL(h30@wMdLYQ^ I4W1hW0EnEhVE_OC diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index 2a57c73fc60..7e1881c74a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -13,8 +13,8 @@ 2 1 - 1512000829976 - e7457bcc6ab95a84 + 1513298395825 + ddb3d2c37b57926a @@ -24,8 +24,8 @@ 3 2 - 1512000829980 - 07cc38caf6c47bb4 + 1513298395827 + 57acfb80c8b539fa @@ -36,11 +36,11 @@ 0 16386 /file_create - 1 - 1511309632199 - 1511309632199 + 3 + 1512607197452 + 1512607197452 512 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -48,8 +48,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 5 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 35 @@ -59,9 +60,9 @@ 0 0 /file_create - 1 - 1511309632248 - 1511309632199 + 3 + 1512607197500 + 1512607197452 512 @@ -78,11 +79,11 @@ 6 /file_create - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 false - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 7 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 37 @@ -92,9 +93,9 @@ 0 0 /file_create - 1 - 1511309632263 - 1511309632199 + 3 + 1512607197516 + 1512607197452 512 @@ -114,10 +115,10 @@ 16387 /update_blocks 1 - 1511309632266 - 1511309632266 + 1512607197519 + 1512607197519 4096 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -125,8 +126,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 9 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 39 @@ -188,8 +190,8 @@ 0 /update_blocks 1 - 1511309632454 - 1511309632266 + 1512607197657 + 1512607197519 4096 @@ -216,9 +218,9 @@ 0 /file_create /file_moved - 1511309632467 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 17 + 1512607197671 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 47 @@ -227,9 +229,9 @@ 17 0 /file_moved - 1511309632480 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 19 + 1512607197680 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 49 @@ -239,7 +241,7 @@ 0 16388 /directory_mkdir - 1511309632488 + 1512607197690 lei supergroup @@ -274,8 +276,8 @@ 22 /directory_mkdir snapshot1 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 24 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 54 @@ -285,8 +287,8 @@ /directory_mkdir snapshot1 snapshot2 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 25 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 55 @@ -295,8 +297,8 @@ 24 /directory_mkdir snapshot2 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 26 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 56 @@ -306,11 +308,11 @@ 0 16389 /file_create - 1 - 1511309632528 - 1511309632528 + 3 + 1512607197723 + 1512607197723 512 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -318,8 +320,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 27 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 57 @@ -329,9 +332,9 @@ 0 0 /file_create - 1 - 1511309632530 - 1511309632528 + 3 + 1512607197726 + 1512607197723 512 @@ -402,10 +405,10 @@ 0 /file_create /file_moved - 1511309632561 + 1512607197754 TO_TRASH - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 35 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 65 @@ -416,10 +419,10 @@ 16390 /file_concat_target 1 - 1511309632566 - 1511309632566 + 1512607197759 + 1512607197759 512 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -427,8 +430,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 37 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 67 @@ -533,8 +537,8 @@ 0 /file_concat_target 1 - 1511309632593 - 1511309632566 + 1512607197800 + 1512607197759 512 @@ -569,10 +573,10 @@ 16391 /file_concat_0 1 - 1511309632596 - 1511309632596 + 1512607197803 + 1512607197803 512 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -580,8 +584,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 46 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 76 @@ -686,8 +691,8 @@ 0 /file_concat_0 1 - 1511309632618 - 1511309632596 + 1512607197837 + 1512607197803 512 @@ -722,10 +727,10 @@ 16392 /file_concat_1 1 - 1511309632620 - 1511309632620 + 1512607197839 + 1512607197839 512 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -733,8 +738,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 55 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 85 @@ -839,8 +845,8 @@ 0 /file_concat_1 1 - 1511309632643 - 1511309632620 + 1512607197878 + 1512607197839 512 @@ -873,13 +879,13 @@ 67 0 /file_concat_target - 1511309632648 + 1512607197882 /file_concat_0 /file_concat_1 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 63 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 93 @@ -890,10 +896,10 @@ 16393 /file_create 1 - 1511309632651 - 1511309632651 + 1512607197885 + 1512607197885 512 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -901,8 +907,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 65 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 95 @@ -974,8 +981,8 @@ 0 /file_create 1 - 1511309632667 - 1511309632651 + 1512607197909 + 1512607197885 512 @@ -1002,10 +1009,10 @@ 76 /file_create - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 512 - 1511309632671 + 1512607197912 @@ -1016,15 +1023,15 @@ 16394 /file_symlink /file_concat_target - 1511309632686 - 1511309632686 + 1512607197921 + 1512607197921 lei supergroup 511 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 72 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 102 @@ -1034,11 +1041,11 @@ 0 16395 /hard-lease-recovery-test - 1 - 1511309632689 - 1511309632689 + 3 + 1512607197925 + 1512607197925 512 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true @@ -1046,8 +1053,9 @@ supergroup 420 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 73 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 103 @@ -1103,21 +1111,30 @@ OP_REASSIGN_LEASE 84 - DFSClient_NONMAPREDUCE_2134933941_1 + DFSClient_NONMAPREDUCE_-923924783_1 /hard-lease-recovery-test - HDFS_NameNode-2017-11-21 16:13:54,700-0800 + HDFS_NameNode-2017-12-06 16:39:59,951-0800 + + + + OP_REASSIGN_LEASE + + 85 + HDFS_NameNode-2017-12-06 16:39:59,951-0800 + /hard-lease-recovery-test + HDFS_NameNode-2017-12-06 16:40:01,959-0800 OP_CLOSE - 85 + 86 0 0 /hard-lease-recovery-test - 1 - 1511309634880 - 1511309632689 + 3 + 1512607202974 + 1512607197925 512 @@ -1137,7 +1154,7 @@ OP_ADD_CACHE_POOL - 86 + 87 pool1 lei staff @@ -1145,65 +1162,65 @@ 9223372036854775807 2305843009213693951 1 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 80 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 138 OP_MODIFY_CACHE_POOL - 87 + 88 pool1 99 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 81 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 139 OP_ADD_CACHE_DIRECTIVE - 88 + 89 1 /path 1 pool1 - 2305844520523329692 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 82 + 2305844521820897941 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 140 OP_MODIFY_CACHE_DIRECTIVE - 89 + 90 1 2 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 83 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 141 OP_REMOVE_CACHE_DIRECTIVE - 90 + 91 1 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 84 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 142 OP_REMOVE_CACHE_POOL - 91 + 92 pool1 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 85 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 143 OP_SET_ACL - 92 + 93 /file_concat_target ACCESS @@ -1236,55 +1253,42 @@ OP_SET_XATTR - 93 + 94 /file_concat_target USER a1 0x313233 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 87 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 145 OP_SET_XATTR - - 94 - /file_concat_target - - USER - a2 - 0x373839 - - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 88 - - - - OP_REMOVE_XATTR 95 /file_concat_target USER a2 + 0x373839 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 89 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 146 - OP_ADD_ERASURE_CODING_POLICY + OP_REMOVE_XATTR 96 - rs - 3 - 2 - 8192 - 0 - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 90 + /file_concat_target + + USER + a2 + + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 147 @@ -1292,6 +1296,19 @@ 97 rs + 3 + 2 + 8192 + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 148 + + + + OP_ADD_ERASURE_CODING_POLICY + + 98 + rs 6 10 4096 @@ -1300,82 +1317,285 @@ dummyKey dummyValue - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 91 - - - - OP_ENABLE_ERASURE_CODING_POLICY - - 98 - RS-3-2-8k - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 92 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 149 OP_ENABLE_ERASURE_CODING_POLICY 99 - RS-6-10-4k - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 93 + RS-3-2-8k + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 150 - OP_DISABLE_ERASURE_CODING_POLICY + OP_ENABLE_ERASURE_CODING_POLICY 100 - RS-3-2-8k - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 94 + RS-6-10-4k + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 151 OP_DISABLE_ERASURE_CODING_POLICY 101 - RS-6-10-4k - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 95 + RS-3-2-8k + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 152 - OP_REMOVE_ERASURE_CODING_POLICY + OP_DISABLE_ERASURE_CODING_POLICY 102 - RS-3-2-8k - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 96 + RS-6-10-4k + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 153 OP_REMOVE_ERASURE_CODING_POLICY 103 + RS-3-2-8k + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 154 + + + + OP_REMOVE_ERASURE_CODING_POLICY + + 104 RS-6-10-4k - a4dc081c-6d6f-42d6-af5b-d260228f1aad - 97 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 155 + + + + OP_MKDIR + + 105 + 0 + 16396 + /ec + 1512607204077 + + lei + supergroup + 493 + + + + + OP_ENABLE_ERASURE_CODING_POLICY + + 106 + RS-3-2-1024k + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 157 + + + + OP_ENABLE_ERASURE_CODING_POLICY + + 107 + RS-6-3-1024k + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 158 + + + + OP_SET_XATTR + + 108 + /ec + + SYSTEM + hdfs.erasurecoding.policy + 0x0000000c52532d362d332d313032346b + + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 159 + + + + OP_ADD + + 109 + 0 + 16397 + /ec/replicated + 3 + 1512607204088 + 1512607204088 + 512 + DFSClient_NONMAPREDUCE_-923924783_1 + 127.0.0.1 + true + + lei + supergroup + 420 + + 0 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 160 + + + + OP_ALLOCATE_BLOCK_ID + + 110 + 1073741838 + + + + OP_SET_GENSTAMP_V2 + + 111 + 1015 + + + + OP_ADD_BLOCK + + 112 + /ec/replicated + + 1073741838 + 0 + 1015 + + + -2 + + + + OP_CLOSE + + 113 + 0 + 0 + /ec/replicated + 3 + 1512607204118 + 1512607204088 + 512 + + + false + + 1073741838 + 10 + 1015 + + + lei + supergroup + 420 + + + + + OP_ADD + + 114 + 0 + 16398 + /ec/RS-3-2 + 1 + 1512607204120 + 1512607204120 + 512 + DFSClient_NONMAPREDUCE_-923924783_1 + 127.0.0.1 + true + + lei + supergroup + 420 + + 2 + cab1aa2d-e08a-4d2f-8216-76e167eccd94 + 166 + + + + OP_ALLOCATE_BLOCK_ID + + 115 + -9223372036854775792 + + + + OP_SET_GENSTAMP_V2 + + 116 + 1016 + + + + OP_ADD_BLOCK + + 117 + /ec/RS-3-2 + + -9223372036854775792 + 0 + 1016 + + + -2 + + + + OP_CLOSE + + 118 + 0 + 0 + /ec/RS-3-2 + 1 + 1512607204229 + 1512607204120 + 512 + + + false + + -9223372036854775792 + 6 + 1016 + + + lei + supergroup + 420 + OP_ROLLING_UPGRADE_START - 104 - 1511309635904 + 119 + 1512607204230 OP_ROLLING_UPGRADE_FINALIZE - 105 - 1511309635904 + 120 + 1512607204233 OP_END_LOG_SEGMENT - 106 + 121