HDFS-13830. Backport HDFS-13141 to branch-3.0: WebHDFS: Add support for getting snasphottable directory list. Contributed by Siyao Meng, Lokesh Jain.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
Siyao Meng 2018-09-21 12:37:43 -07:00 committed by Wei-Chiu Chuang
parent fc6d851f70
commit 2449795b8e
9 changed files with 205 additions and 10 deletions

View File

@ -56,15 +56,25 @@ public class FileStatus implements Writable, Comparable<Object>,
private Path symlink;
private Set<AttrFlags> attr;
private enum AttrFlags {
public enum AttrFlags {
HAS_ACL,
HAS_CRYPT,
HAS_EC,
SNAPSHOT_ENABLED
}
private static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
private static Set<AttrFlags> flags(boolean acl, boolean crypt, boolean ec) {
if (!(acl || crypt || ec)) {
public static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
/**
* Convert boolean attributes to a set of flags.
* @param acl See {@link AttrFlags#HAS_ACL}.
* @param crypt See {@link AttrFlags#HAS_CRYPT}.
* @param ec See {@link AttrFlags#HAS_EC}.
* @param sn See {@link AttrFlags#SNAPSHOT_ENABLED}.
* @return converted set of flags.
*/
public static Set<AttrFlags> flags(boolean acl, boolean crypt,
boolean ec, boolean sn) {
if (!(acl || crypt || ec || sn)) {
return NONE;
}
EnumSet<AttrFlags> ret = EnumSet.noneOf(AttrFlags.class);
@ -77,6 +87,9 @@ public class FileStatus implements Writable, Comparable<Object>,
if (ec) {
ret.add(AttrFlags.HAS_EC);
}
if (sn) {
ret.add(AttrFlags.SNAPSHOT_ENABLED);
}
return ret;
}
@ -136,7 +149,7 @@ public class FileStatus implements Writable, Comparable<Object>,
this.group = (group == null) ? "" : group;
this.symlink = symlink;
this.path = path;
attr = flags(hasAcl, isEncrypted, isErasureCoded);
attr = flags(hasAcl, isEncrypted, isErasureCoded, false);
// The variables isdir and symlink indicate the type:
// 1. isdir implies directory, in which case symlink must be null.
@ -480,7 +493,8 @@ public class FileStatus implements Writable, Comparable<Object>,
setGroup(other.getGroup());
setSymlink((other.isSymlink() ? other.getSymlink() : null));
setPath(other.getPath());
attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded());
attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded(),
other.isSnapshotEnabled());
assert !(isDirectory() && isSymlink()) : "A directory cannot be a symlink";
}

View File

@ -88,6 +88,7 @@ public class DFSOpsCountStatistics extends StorageStatistics {
SET_TIMES(CommonStatisticNames.OP_SET_TIMES),
SET_XATTR("op_set_xattr"),
GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
UNSET_STORAGE_POLICY("op_unset_storage_policy");

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -58,7 +59,32 @@ public class HdfsFileStatus extends FileStatus {
HAS_ACL,
HAS_CRYPT,
HAS_EC,
SNAPSHOT_ENABLED
SNAPSHOT_ENABLED;
/**
* Generates an enum set of Flags from a set of attr flags.
* @param attr Set of attr flags
* @return EnumSet of Flags
*/
public static EnumSet<Flags> convert(Set<AttrFlags> attr) {
if (attr.isEmpty()) {
return EnumSet.noneOf(Flags.class);
}
EnumSet<Flags> flags = EnumSet.noneOf(Flags.class);
if (attr.contains(AttrFlags.HAS_ACL)) {
flags.add(Flags.HAS_ACL);
}
if (attr.contains(AttrFlags.HAS_EC)) {
flags.add(Flags.HAS_EC);
}
if (attr.contains(AttrFlags.HAS_CRYPT)) {
flags.add(Flags.HAS_CRYPT);
}
if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
flags.add(Flags.SNAPSHOT_ENABLED);
}
return flags;
}
}
private final EnumSet<Flags> flags;

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -64,6 +65,7 @@ import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
class JsonUtilClient {
static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
@ -746,4 +748,45 @@ class JsonUtilClient {
}
return DFSUtilClient.string2Bytes(str);
}
public static SnapshottableDirectoryStatus[] toSnapshottableDirectoryList(
final Map<?, ?> json) {
if (json == null) {
return null;
}
List<?> list = (List<?>) json.get("SnapshottableDirectoryList");
if (list == null) {
return null;
}
SnapshottableDirectoryStatus[] statuses =
new SnapshottableDirectoryStatus[list.size()];
for (int i = 0; i < list.size(); i++) {
statuses[i] = toSnapshottableDirectoryStatus((Map<?, ?>) list.get(i));
}
return statuses;
}
private static SnapshottableDirectoryStatus toSnapshottableDirectoryStatus(
Map<?, ?> json) {
if (json == null) {
return null;
}
int snapshotNumber = getInt(json, "snapshotNumber", 0);
int snapshotQuota = getInt(json, "snapshotQuota", 0);
byte[] parentFullPath = toByteArray((String) json.get("parentFullPath"));
HdfsFileStatus dirStatus =
toFileStatus((Map<?, ?>) json.get("dirStatus"), false);
Set<FileStatus.AttrFlags> attrFlags = FileStatus
.flags(dirStatus.hasAcl(), dirStatus.isEncrypted(),
dirStatus.isErasureCoded(), dirStatus.isSnapshotEnabled());
SnapshottableDirectoryStatus snapshottableDirectoryStatus =
new SnapshottableDirectoryStatus(dirStatus.getModificationTime(),
dirStatus.getAccessTime(), dirStatus.getPermission(),
HdfsFileStatus.Flags.convert(attrFlags), dirStatus.getOwner(),
dirStatus.getGroup(), dirStatus.getLocalNameInBytes(),
dirStatus.getFileId(), dirStatus.getChildrenNum(), snapshotNumber,
snapshotQuota, parentFullPath);
return snapshottableDirectoryStatus;
}
}

View File

@ -97,6 +97,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@ -1331,6 +1332,19 @@ public class WebHdfsFileSystem extends FileSystem
}.run();
}
public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList()
throws IOException {
storageStatistics
.incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTTABLEDIRECTORYLIST;
return new FsPathResponseRunner<SnapshottableDirectoryStatus[]>(op, null) {
@Override
SnapshottableDirectoryStatus[] decodeResponse(Map<?, ?> json) {
return JsonUtilClient.toSnapshottableDirectoryList(json);
}
}.run();
}
@Override
public boolean setReplication(final Path p, final short replication
) throws IOException {

View File

@ -48,7 +48,8 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
CHECKACCESS(false, HttpURLConnection.HTTP_OK),
LISTSTATUS_BATCH(false, HttpURLConnection.HTTP_OK),
GETSERVERDEFAULTS(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK);
GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK);
final boolean redirect;
final int expectedHttpResponseCode;

View File

@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
@ -1221,6 +1222,12 @@ public class NamenodeWebHdfsMethods {
final String js = JsonUtil.toJsonString(diffReport);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETSNAPSHOTTABLEDIRECTORYLIST: {
SnapshottableDirectoryStatus[] snapshottableDirectoryList =
cp.getSnapshottableDirListing();
final String js = JsonUtil.toJsonString(snapshottableDirectoryList);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}

View File

@ -530,4 +530,24 @@ public class JsonUtil {
}
return m;
}
public static String toJsonString(
SnapshottableDirectoryStatus[] snapshottableDirectoryList) {
Object[] a = new Object[snapshottableDirectoryList.length];
for (int i = 0; i < snapshottableDirectoryList.length; i++) {
a[i] = toJsonMap(snapshottableDirectoryList[i]);
}
return toJsonString("SnapshottableDirectoryList", a);
}
private static Object toJsonMap(
SnapshottableDirectoryStatus snapshottableDirectoryStatus) {
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("snapshotNumber", snapshottableDirectoryStatus.getSnapshotNumber());
m.put("snapshotQuota", snapshottableDirectoryStatus.getSnapshotQuota());
m.put("parentFullPath", DFSUtilClient
.bytes2String(snapshottableDirectoryStatus.getParentFullPath()));
m.put("dirStatus", toJsonMap(snapshottableDirectoryStatus.getDirStatus()));
return m;
}
}

View File

@ -640,7 +640,7 @@ public class TestWebHDFS {
}
/**
* Test snapshot deletion through WebHdfs
* Test snapshot deletion through WebHdfs.
*/
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
@ -685,7 +685,7 @@ public class TestWebHDFS {
}
/**
* Test snapshot diff through WebHdfs
* Test snapshot diff through WebHdfs.
*/
@Test
public void testWebHdfsSnapshotDiff() throws Exception {
@ -756,6 +756,75 @@ public class TestWebHDFS {
}
}
/**
* Test snapshottable directory list through WebHdfs.
*/
@Test
public void testWebHdfsSnapshottableDirectoryList() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
dfs.mkdirs(foo);
dfs.mkdirs(bar);
dfs.allowSnapshot(foo);
dfs.allowSnapshot(bar);
Path file0 = new Path(foo, "file0");
DFSTestUtil.createFile(dfs, file0, 100, (short) 1, 0);
Path file1 = new Path(bar, "file1");
DFSTestUtil.createFile(dfs, file1, 100, (short) 1, 0);
SnapshottableDirectoryStatus[] statuses =
webHdfs.getSnapshottableDirectoryList();
SnapshottableDirectoryStatus[] dfsStatuses =
dfs.getSnapshottableDirListing();
for (int i = 0; i < dfsStatuses.length; i++) {
Assert.assertEquals(statuses[i].getSnapshotNumber(),
dfsStatuses[i].getSnapshotNumber());
Assert.assertEquals(statuses[i].getSnapshotQuota(),
dfsStatuses[i].getSnapshotQuota());
Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
dfsStatuses[i].getParentFullPath()));
Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
statuses[i].getDirStatus().getChildrenNum());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
statuses[i].getDirStatus().getModificationTime());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isDirectory(),
statuses[i].getDirStatus().isDirectory());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
statuses[i].getDirStatus().getAccessTime());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
statuses[i].getDirStatus().getPermission());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
statuses[i].getDirStatus().getOwner());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
statuses[i].getDirStatus().getGroup());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
statuses[i].getDirStatus().getPath());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
statuses[i].getDirStatus().getFileId());
Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
statuses[i].getDirStatus().hasAcl());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
statuses[i].getDirStatus().isEncrypted());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
statuses[i].getDirStatus().isErasureCoded());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
statuses[i].getDirStatus().isSnapshotEnabled());
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException {
MiniDFSCluster cluster = null;