HDFS-15611. Add list Snapshot command in WebHDFS. (#2355)

This commit is contained in:
bshashikant 2020-10-07 10:34:32 +05:30 committed by GitHub
parent 074f0d46af
commit 16aea11c94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 412 additions and 6 deletions

View File

@ -73,9 +73,18 @@ public SnapshotStatus(long modificationTime, long accessTime,
this.parentFullPath = parentFullPath;
}
public SnapshotStatus(HdfsFileStatus dirStatus,
int snapshotID, boolean isDeleted,
byte[] parentFullPath) {
this.dirStatus = dirStatus;
this.snapshotID = snapshotID;
this.isDeleted = isDeleted;
this.parentFullPath = parentFullPath;
}
/**
* sets the prent path name.
* @param path parent path
* sets the path name.
* @param path path
*/
public void setParentFullPath(byte[] path) {
parentFullPath = path;
@ -174,7 +183,7 @@ private static int maxLength(int n, Object value) {
return Math.max(n, String.valueOf(value).length());
}
static String getSnapshotPath(String snapshottableDir,
public static String getSnapshotPath(String snapshottableDir,
String snapshotRelativePath) {
String parentFullPathStr =
snapshottableDir == null || snapshottableDir.isEmpty() ?
@ -188,4 +197,9 @@ static String getSnapshotPath(String snapshottableDir,
.append(snapshotRelativePath)
.toString();
}
public static String getParentPath(String snapshotPath) {
int index = snapshotPath.indexOf(HdfsConstants.DOT_SNAPSHOT_DIR);
return index == -1 ? snapshotPath : snapshotPath.substring(0, index - 1);
}
}

View File

@ -41,6 +41,7 @@
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -872,4 +873,39 @@ private static SnapshottableDirectoryStatus toSnapshottableDirectoryStatus(
snapshotQuota, parentFullPath);
return snapshottableDirectoryStatus;
}
public static SnapshotStatus[] toSnapshotList(final Map<?, ?> json) {
if (json == null) {
return null;
}
List<?> list = (List<?>) json.get("SnapshotList");
if (list == null) {
return null;
}
SnapshotStatus[] statuses =
new SnapshotStatus[list.size()];
for (int i = 0; i < list.size(); i++) {
statuses[i] = toSnapshotStatus((Map<?, ?>) list.get(i));
}
return statuses;
}
private static SnapshotStatus toSnapshotStatus(
Map<?, ?> json) {
if (json == null) {
return null;
}
int snapshotID = getInt(json, "snapshotID", 0);
boolean isDeleted = "DELETED".equalsIgnoreCase(
(String)json.get("deletionStatus"));
String fullPath = ((String) json.get("fullPath"));
HdfsFileStatus dirStatus =
toFileStatus((Map<?, ?>) json.get("dirStatus"), false);
SnapshotStatus snapshotStatus =
new SnapshotStatus(dirStatus, snapshotID,
isDeleted, DFSUtilClient.string2Bytes(
SnapshotStatus.getParentPath(fullPath)));
return snapshotStatus;
}
}

View File

@ -106,6 +106,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@ -1459,6 +1460,19 @@ SnapshottableDirectoryStatus[] decodeResponse(Map<?, ?> json) {
}.run();
}
public SnapshotStatus[] getSnapshotListing(final Path snapshotDir)
throws IOException {
storageStatistics
.incrementOpCounter(OpType.GET_SNAPSHOT_LIST);
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTLIST;
return new FsPathResponseRunner<SnapshotStatus[]>(op, snapshotDir) {
@Override
SnapshotStatus[] decodeResponse(Map<?, ?> json) {
return JsonUtilClient.toSnapshotList(json);
}
}.run();
}
@Override
public boolean setReplication(final Path p, final short replication
) throws IOException {

View File

@ -62,7 +62,8 @@ public enum Op implements HttpOpParam.Op {
LISTSTATUS_BATCH(false, HttpURLConnection.HTTP_OK),
GETSERVERDEFAULTS(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK);
GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTLIST(false, HttpURLConnection.HTTP_OK);
final boolean redirect;
final int expectedHttpResponseCode;

View File

@ -54,6 +54,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.web.JsonUtilClient;
import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.security.UserGroupInformation;
@ -262,7 +263,8 @@ public enum Operation {
ALLOWSNAPSHOT(HTTP_PUT), DISALLOWSNAPSHOT(HTTP_PUT),
CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET),
GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET), GETSERVERDEFAULTS(HTTP_GET),
GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET), GETSNAPSHOTLIST(HTTP_GET),
GETSERVERDEFAULTS(HTTP_GET),
CHECKACCESS(HTTP_GET), SETECPOLICY(HTTP_PUT), GETECPOLICY(
HTTP_GET), UNSETECPOLICY(HTTP_POST), SATISFYSTORAGEPOLICY(HTTP_PUT);
@ -1582,6 +1584,18 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList()
return JsonUtilClient.toSnapshottableDirectoryList(json);
}
public SnapshotStatus[] getSnapshotListing(Path snapshotRoot)
throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETSNAPSHOTLIST.toString());
HttpURLConnection conn = getConnection(
Operation.GETSNAPSHOTLIST.getMethod(),
params, snapshotRoot, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return JsonUtilClient.toSnapshotList(json);
}
/**
* This filesystem's capabilities must be in sync with that of
* {@code DistributedFileSystem.hasPathCapability()} except

View File

@ -44,6 +44,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.service.FileSystemAccess;
@ -1834,6 +1835,43 @@ public String execute(FileSystem fs) throws IOException {
}
}
/**
* Executor that performs a getSnapshotListing operation.
*/
@InterfaceAudience.Private
public static class FSGetSnapshotListing implements
FileSystemAccess.FileSystemExecutor<String> {
private Path path;
/**
* Creates a getSnapshotDiff executor.
* @param path directory path of the snapshots to be examined.
*/
public FSGetSnapshotListing(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
* @param fs filesystem instance to use.
* @return A JSON string of all snapshots for a snapshottable directory.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public String execute(FileSystem fs) throws IOException {
SnapshotStatus[] sds = null;
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) fs;
sds = dfs.getSnapshotListing(path);
} else {
throw new UnsupportedOperationException("getSnapshotListing is "
+ "not supported for HttpFs on " + fs.getClass()
+ ". Please check your fs.defaultFS configuration");
}
return JsonUtil.toJsonString(sds);
}
}
/**
* Executor that performs a getServerDefaults operation.
*/

View File

@ -116,6 +116,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
new Class[] {OldSnapshotNameParam.class,
SnapshotNameParam.class});
PARAMS_DEF.put(Operation.GETSNAPSHOTTABLEDIRECTORYLIST, new Class[] {});
PARAMS_DEF.put(Operation.GETSNAPSHOTLIST, new Class[] {});
PARAMS_DEF.put(Operation.GETSERVERDEFAULTS, new Class[] {});
PARAMS_DEF.put(Operation.CHECKACCESS, new Class[] {FsActionParam.class});
PARAMS_DEF.put(Operation.SETECPOLICY, new Class[] {ECPolicyParam.class});

View File

@ -458,6 +458,14 @@ public InputStream run() throws Exception {
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTLIST: {
FSOperations.FSGetSnapshotListing command =
new FSOperations.FSGetSnapshotListing(path);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSERVERDEFAULTS: {
FSOperations.FSGetServerDefaults command =
new FSOperations.FSGetServerDefaults();

View File

@ -51,6 +51,7 @@
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.web.JsonUtil;
@ -1158,7 +1159,8 @@ protected enum Operation {
CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT,
ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION,
FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST,
GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY, SATISFYSTORAGEPOLICY
GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY,
SATISFYSTORAGEPOLICY
}
private void operation(Operation op) throws Exception {
@ -1279,6 +1281,9 @@ private void operation(Operation op) throws Exception {
case GET_SNAPSHOTTABLE_DIRECTORY_LIST:
testGetSnapshottableDirListing();
break;
case GET_SNAPSHOT_LIST:
testGetSnapshotListing();
break;
case GET_SERVERDEFAULTS:
testGetServerDefaults();
break;
@ -1657,6 +1662,50 @@ private void verifyGetSnapshottableDirListing(
JsonUtil.toJsonString(dfssds));
}
private void testGetSnapshotListing() throws Exception {
if (!this.isLocalFS()) {
// Create a directory with snapshot allowed
Path path = new Path("/tmp/tmp-snap-test");
createSnapshotTestsPreconditions(path);
// Get the FileSystem instance that's being tested
FileSystem fs = this.getHttpFSFileSystem();
// Check FileStatus
Assert.assertTrue(fs.getFileStatus(path).isSnapshotEnabled());
// Create a file and take a snapshot
Path file1 = new Path(path, "file1");
testCreate(file1, false);
fs.createSnapshot(path, "snap1");
// Create another file and take a snapshot
Path file2 = new Path(path, "file2");
testCreate(file2, false);
fs.createSnapshot(path, "snap2");
// Get snapshot diff
SnapshotStatus[] snapshotStatus = null;
if (fs instanceof HttpFSFileSystem) {
HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
snapshotStatus = httpFS.getSnapshotListing(path);
} else if (fs instanceof WebHdfsFileSystem) {
WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
snapshotStatus = webHdfsFileSystem.getSnapshotListing(path);
} else {
Assert.fail(fs.getClass().getSimpleName() +
" doesn't support getSnapshotDiff");
}
// Verify result with DFS
DistributedFileSystem dfs = (DistributedFileSystem)
FileSystem.get(path.toUri(), this.getProxiedFSConf());
SnapshotStatus[] dfsStatus =
dfs.getSnapshotListing(path);
Assert.assertEquals(JsonUtil.toJsonString(snapshotStatus),
JsonUtil.toJsonString(dfsStatus));
// Cleanup
fs.deleteSnapshot(path, "snap2");
fs.deleteSnapshot(path, "snap1");
fs.delete(path, true);
}
}
private void testGetSnapshottableDirListing() throws Exception {
if (!this.isLocalFS()) {
FileSystem fs = this.getHttpFSFileSystem();

View File

@ -32,6 +32,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.web.JsonUtil;
@ -1515,6 +1516,23 @@ private void verifyGetSnapshottableDirectoryList(DistributedFileSystem dfs)
Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
}
private void verifyGetSnapshotList(DistributedFileSystem dfs, Path path)
throws Exception {
// Send a request
HttpURLConnection conn = sendRequestToHttpFSServer(path.toString(),
"GETSNAPSHOTLIST", "");
// Should return HTTP_OK
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
// The response should be a one-line JSON string.
String dirLst = reader.readLine();
// Verify the content of status with DFS API.
SnapshotStatus[] dfsDirLst = dfs.getSnapshotListing(path);
Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
}
@Test
@TestDir
@TestJetty
@ -1550,6 +1568,35 @@ public void testGetSnapshottableDirectoryList() throws Exception {
verifyGetSnapshottableDirectoryList(dfs);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetSnapshotList() throws Exception {
createHttpFSServer(false, false);
// Create test directories
String pathStr = "/tmp/tmp-snap-list-test-1";
createDirWithHttp(pathStr, "700", null);
Path path = new Path(pathStr);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path.toUri(), TestHdfsHelper.getHdfsConf());
// Enable snapshot for path1
dfs.allowSnapshot(path);
Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Verify response when there is one snapshottable directory
verifyGetSnapshotList(dfs, path);
// Create a file and take a snapshot
String file1 = pathStr + "/file1";
createWithHttp(file1, null);
dfs.createSnapshot(path, "snap1");
// Create another file and take a snapshot
String file2 = pathStr + "/file2";
createWithHttp(file2, null);
dfs.createSnapshot(path, "snap2");
verifyGetSnapshotList(dfs, path);
}
@Test
@TestDir
@TestJetty

View File

@ -86,6 +86,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
@ -1340,6 +1341,12 @@ protected Response get(
final String js = JsonUtil.toJsonString(snapshottableDirectoryList);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETSNAPSHOTLIST: {
SnapshotStatus[] snapshotList =
cp.getSnapshotListing(fullpath);
final String js = JsonUtil.toJsonString(snapshotList);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}

View File

@ -588,6 +588,17 @@ public static String toJsonString(
return toJsonString("SnapshottableDirectoryList", a);
}
public static String toJsonString(SnapshotStatus[] snapshotList) {
if (snapshotList == null) {
return toJsonString("SnapshotList", null);
}
Object[] a = new Object[snapshotList.length];
for (int i = 0; i < snapshotList.length; i++) {
a[i] = toJsonMap(snapshotList[i]);
}
return toJsonString("SnapshotList", a);
}
private static Object toJsonMap(
SnapshottableDirectoryStatus snapshottableDirectoryStatus) {
final Map<String, Object> m = new TreeMap<String, Object>();
@ -599,6 +610,19 @@ private static Object toJsonMap(
return m;
}
private static Object toJsonMap(
SnapshotStatus snapshotStatus) {
final Map<String, Object> m = new TreeMap<String, Object>();
HdfsFileStatus status = snapshotStatus.getDirStatus();
m.put("snapshotID", snapshotStatus.getSnapshotID());
m.put("deletionStatus", snapshotStatus.isDeleted() ? "DELETED" : "ACTIVE");
m.put("fullPath", SnapshotStatus.getSnapshotPath(
DFSUtilClient.bytes2String(snapshotStatus.getParentFullPath()),
status.getLocalName()));
m.put("dirStatus", toJsonMap(snapshotStatus.getDirStatus()));
return m;
}
private static Map<String, Object> toJsonMap(
final BlockLocation blockLocation) throws IOException {
if (blockLocation == null) {

View File

@ -53,6 +53,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy)
* [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff)
* [`GETSNAPSHOTTABLEDIRECTORYLIST`](#Get_Snapshottable_Directory_List)
* [`GETSNAPSHOTLIST`](#Get_Snapshot_List)
* [`GETFILEBLOCKLOCATIONS`](#Get_File_Block_Locations) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileBlockLocations)
* [`GETECPOLICY`](#Get_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).getErasureCodingPolicy)
* HTTP PUT
@ -1642,6 +1643,46 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSna
]
}
### Get Snapshot List
* Submit a HTTP GET request.
curl -i GET "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?"
The call lists the snapshots for a snapshottable directory. The client receives a response with a [`SnapshotList` JSON object](#SnapshotList_JSON_Schema):
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
{
"SnapshotList":
[
{
"dirStatus":
{
"accessTime":0,
"blockSize":0,
"childrenNum":0,
"fileId":16386,
"group":"hadoop",
"length":0,
"modificationTime":1520761889225,
"owner":"random",
"pathSuffix":"bar",
"permission":"755",
"replication":0,
"storagePolicy":0,
"type":"DIRECTORY"
},
"fullPath":"/",
"snapshotID":0,
"deletionStatus":ACTIVE
}
]
}
Delegation Token Operations
---------------------------
@ -2675,6 +2716,57 @@ var snapshottableDirectoryStatus =
}
}
```
### SnapshotList JSON Schema
```json
{
"name": "SnapshotList",
"type": "object",
"properties":
{
"SnapshotList":
{
"description": "An array of SnapshotStatus",
"type" : "array",
"items" : snapshotStatus,
"required" : true
}
}
}
```
#### SnapshotStatus
JavaScript syntax is used to define `snapshotStatus` so that it can be referred in `SnapshotList` JSON schema.
```javascript
var snapshotStatus =
{
"type": "object",
"properties":
{
"dirStatus": fileStatusProperties,
"fullPath":
{
"description" : "Full path of the parent of the snapshot",
"type" : "string",
"required" : true
},
"snapshotID":
{
"description" : "snapshot ID for the snapshot",
"type" : "integer",
"required" : true
},
"deletionStatus":
{
"description" : "Status showing whether the snapshot is active or in deleted state",
"type" : "string",
"required" : true
}
}
}
```
### BlockLocations JSON Schema

View File

@ -106,6 +106,7 @@
import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -843,6 +844,66 @@ public void testWebHdfsSnapshottableDirectoryList() throws Exception {
}
}
@Test
public void testWebHdfsSnapshotList() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
webHdfs.createSnapshot(foo, "s2");
webHdfs.deleteSnapshot(foo, "s2");
SnapshotStatus[] statuses = webHdfs.getSnapshotListing(foo);
SnapshotStatus[] dfsStatuses = dfs.getSnapshotListing(foo);
for (int i = 0; i < dfsStatuses.length; i++) {
Assert.assertEquals(statuses[i].getSnapshotID(),
dfsStatuses[i].getSnapshotID());
Assert.assertEquals(statuses[i].isDeleted(),
dfsStatuses[i].isDeleted());
Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
dfsStatuses[i].getParentFullPath()));
Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
statuses[i].getDirStatus().getChildrenNum());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
statuses[i].getDirStatus().getModificationTime());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
statuses[i].getDirStatus().isDir());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
statuses[i].getDirStatus().getAccessTime());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
statuses[i].getDirStatus().getPermission());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
statuses[i].getDirStatus().getOwner());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
statuses[i].getDirStatus().getGroup());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
statuses[i].getDirStatus().getPath());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
statuses[i].getDirStatus().getFileId());
Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
statuses[i].getDirStatus().hasAcl());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
statuses[i].getDirStatus().isEncrypted());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
statuses[i].getDirStatus().isErasureCoded());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
statuses[i].getDirStatus().isSnapshotEnabled());
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException {
final Configuration conf = WebHdfsTestUtil.createConf();