HDFS-16187. SnapshotDiff behaviour with Xattrs and Acls is not consistent across NN restarts with checkpointing (#3340) (#3640)

(cherry picked from commit 356ebbbe80)

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java

Change-Id: If76fd0d77fafc90fe2f2c19ab1d0c43a58510f6b

Co-authored-by: bshashikant <shashikant@apache.org>
(cherry picked from commit 5333e872e2)
This commit is contained in:
Wei-Chiu Chuang 2021-11-12 14:32:02 +08:00
parent c95df2eb6f
commit 002b5fbdc0
No known key found for this signature in database
GPG Key ID: B362E1C021854B9D
4 changed files with 86 additions and 4 deletions

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -84,6 +85,22 @@ public class XAttrFeature implements INode.Feature {
} }
} }
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
return getXAttrs().equals(((XAttrFeature) o).getXAttrs());
}
@Override
public int hashCode() {
return Arrays.hashCode(getXAttrs().toArray());
}
/** /**
* Get XAttr by name with prefix. * Get XAttr by name with prefix.
* @param prefixedName xAttr name with prefix * @param prefixedName xAttr name with prefix

View File

@ -24,11 +24,14 @@ import java.text.SimpleDateFormat;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
import java.util.Date; import java.util.Date;
import java.util.Objects;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext; import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature; import org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
@ -185,6 +188,18 @@ public class Snapshot implements Comparable<byte[]> {
return computeDirectoryContentSummary(summary, snapshotId); return computeDirectoryContentSummary(summary, snapshotId);
} }
@Override
public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null && getQuotaCounts().equals(other.getQuotaCounts())
&& getPermissionLong() == other.getPermissionLong()
// Acl feature maintains a reference counted map, thereby
// every snapshot copy should point to the same Acl object unless
// there is no change in acl values.
// Reference equals is hence intentional here.
&& getAclFeature() == other.getAclFeature()
&& Objects.equals(getXAttrFeature(), other.getXAttrFeature());
}
@Override @Override
public String getFullPathName() { public String getFullPathName() {
return getSnapshotPath(getParent().getFullPathName(), getLocalName()); return getSnapshotPath(getParent().getFullPathName(), getLocalName());

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import com.google.common.collect.Lists;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
@ -43,8 +45,6 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoInputStream;
@ -75,9 +75,10 @@ import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@ -1185,6 +1186,30 @@ public class TestEncryptionZones {
} }
} }
@Test
public void testEncryptionZonesWithSnapshots() throws Exception {
final Path snapshottable = new Path("/zones");
fsWrapper.mkdir(snapshottable, FsPermission.getDirDefault(),
true);
dfsAdmin.allowSnapshot(snapshottable);
dfsAdmin.createEncryptionZone(snapshottable, TEST_KEY, NO_TRASH);
fs.createSnapshot(snapshottable, "snap1");
SnapshotDiffReport report =
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
Assert.assertEquals(0, report.getDiffList().size());
report =
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
report =
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
Assert.assertEquals(0, report.getDiffList().size());
}
private class AuthorizationExceptionInjector extends EncryptionFaultInjector { private class AuthorizationExceptionInjector extends EncryptionFaultInjector {
@Override @Override
public void ensureKeyIsInitialized() throws IOException { public void ensureKeyIsInitialized() throws IOException {
@ -1731,7 +1756,6 @@ public class TestEncryptionZones {
true, fs.getFileStatus(rootDir).isEncrypted()); true, fs.getFileStatus(rootDir).isEncrypted());
assertEquals("File is encrypted", assertEquals("File is encrypted",
true, fs.getFileStatus(zoneFile).isEncrypted()); true, fs.getFileStatus(zoneFile).isEncrypted());
DFSTestUtil.verifyFilesNotEqual(fs, zoneFile, rawFile, len);
} }
@Test @Test

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -140,6 +141,31 @@ public class TestXAttrWithSnapshot {
assertEquals(xattrs.size(), 0); assertEquals(xattrs.size(), 0);
} }
@Test
public void testXattrWithSnapshotAndNNRestart() throws Exception {
// Init
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
hdfs.setXAttr(path, name1, value1);
hdfs.allowSnapshot(path);
hdfs.createSnapshot(path, snapshotName);
SnapshotDiffReport report =
hdfs.getSnapshotDiffReport(path, snapshotName, "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
report =
hdfs.getSnapshotDiffReport(path, snapshotName, "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
report =
hdfs.getSnapshotDiffReport(path, snapshotName, "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
}
/** /**
* Tests removing xattrs on a directory that has been snapshotted * Tests removing xattrs on a directory that has been snapshotted
*/ */