diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java deleted file mode 100644 index 5ca9d69d7c8..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.web.resources; - -/** - * SkipTrash param to be used by DELETE query. - */ -public class DeleteSkipTrashParam extends BooleanParam { - - public static final String NAME = "skiptrash"; - public static final String DEFAULT = FALSE; - - private static final Domain DOMAIN = new Domain(NAME); - - /** - * Constructor. - * @param value the parameter value. - */ - public DeleteSkipTrashParam(final Boolean value) { - super(DOMAIN, value); - } - - /** - * Constructor. - * @param str a string representation of the parameter value. - */ - public DeleteSkipTrashParam(final String str) { - this(DOMAIN.parse(str)); - } - - @Override - public String getName() { - return NAME; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 3cf7dad1dad..2866044badc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -122,7 +122,6 @@ public class HttpFSFileSystem extends FileSystem public static final String ACLSPEC_PARAM = "aclspec"; public static final String DESTINATION_PARAM = "destination"; public static final String RECURSIVE_PARAM = "recursive"; - public static final String SKIP_TRASH_PARAM = "skiptrash"; public static final String SOURCES_PARAM = "sources"; public static final String OWNER_PARAM = "owner"; public static final String GROUP_PARAM = "group"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index a092db09eed..e272cdc71b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -31,7 +31,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.http.client.HttpFSFileSystem; @@ -54,8 +53,6 @@ import org.apache.hadoop.util.StringUtils; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.apache.hadoop.fs.permission.FsCreateModes; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; @@ -78,8 +75,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT; @InterfaceAudience.Private public final class FSOperations { - private static final Logger LOG = LoggerFactory.getLogger(FSOperations.class); - private static int bufferSize = 4096; private FSOperations() { @@ -722,22 +717,18 @@ public final class FSOperations { */ @InterfaceAudience.Private public static class FSDelete implements FileSystemAccess.FileSystemExecutor { - private final Path path; - private final boolean recursive; - private final boolean skipTrash; + private Path path; + private boolean recursive; /** * Creates a Delete executor. * * @param path path to delete. * @param recursive if the delete should be recursive or not. - * @param skipTrash if the file must be deleted and not kept in trash - * regardless of fs.trash.interval config value. */ - public FSDelete(String path, boolean recursive, boolean skipTrash) { + public FSDelete(String path, boolean recursive) { this.path = new Path(path); this.recursive = recursive; - this.skipTrash = skipTrash; } /** @@ -752,19 +743,6 @@ public final class FSOperations { */ @Override public JSONObject execute(FileSystem fs) throws IOException { - if (!skipTrash) { - boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, - fs.getConf()); - if (movedToTrash) { - HttpFSServerWebApp.getMetrics().incrOpsDelete(); - return toJSON( - StringUtils.toLowerCase(HttpFSFileSystem.DELETE_JSON), true); - } - // Same is the behavior with Delete shell command. - // If moveToAppropriateTrash() returns false, file deletion - // is attempted rather than throwing Error. - LOG.debug("Could not move {} to Trash, attempting removal", path); - } boolean deleted = fs.delete(path, recursive); HttpFSServerWebApp.get().getMetrics().incrOpsDelete(); return toJSON( diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index b810ec66903..f6c84dcae4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -81,8 +81,7 @@ public class HttpFSParametersProvider extends ParametersProvider { new Class[]{ReplicationParam.class}); PARAMS_DEF.put(Operation.SETTIMES, new Class[]{ModifiedTimeParam.class, AccessTimeParam.class}); - PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class, - DeleteSkipTrashParam.class}); + PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class}); PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class}); PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{}); PARAMS_DEF.put(Operation.MODIFYACLENTRIES, @@ -243,25 +242,6 @@ public class HttpFSParametersProvider extends ParametersProvider { } } - /** - * Class for delete's skipTrash parameter. - */ - @InterfaceAudience.Private - public static class DeleteSkipTrashParam extends BooleanParam { - - /** - * Parameter name. - */ - public static final String NAME = HttpFSFileSystem.SKIP_TRASH_PARAM; - - /** - * Constructor. - */ - public DeleteSkipTrashParam() { - super(NAME, false); - } - } - /** * Class for filter parameter. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index e3254d5bd9e..d0d76d62899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam; -import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DeleteSkipTrashParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ECPolicyParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam; @@ -549,13 +548,9 @@ public class HttpFSServer { case DELETE: { Boolean recursive = params.get(RecursiveParam.NAME, RecursiveParam.class); - Boolean skipTrashParam = params.get(DeleteSkipTrashParam.NAME, - DeleteSkipTrashParam.class); - boolean skipTrash = skipTrashParam != null && skipTrashParam; - AUDIT_LOG.info("[{}] recursive [{}] skipTrash [{}]", path, recursive, - skipTrash); + AUDIT_LOG.info("[{}] recursive [{}]", path, recursive); FSOperations.FSDelete command = - new FSOperations.FSDelete(path, recursive, skipTrash); + new FSOperations.FSDelete(path, recursive); JSONObject json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index cddf95e2212..6aa8aa346ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs.http.server; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -538,36 +537,6 @@ public class TestHttpFSServer extends HFSTestCase { Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); } - private void deleteWithHttp(String filename, String perms, - String unmaskedPerms, Boolean skipTrash) throws Exception { - String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; - // Remove leading / from filename - if (filename.charAt(0) == '/') { - filename = filename.substring(1); - } - String pathOps; - if (perms == null) { - pathOps = MessageFormat.format("/webhdfs/v1/{0}?user.name={1}&op=DELETE", - filename, user); - } else { - pathOps = MessageFormat.format( - "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=DELETE", - filename, user, perms); - } - if (unmaskedPerms != null) { - pathOps = pathOps + "&unmaskedpermission=" + unmaskedPerms; - } - if (skipTrash != null) { - pathOps = pathOps + "&skiptrash=" + skipTrash; - } - URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); - HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - conn.addRequestProperty("Content-Type", "application/octet-stream"); - conn.setRequestMethod("DELETE"); - conn.connect(); - Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); - } - /** * Talks to the http interface to create a directory. * @@ -813,37 +782,6 @@ public class TestHttpFSServer extends HFSTestCase { Assert.assertTrue("321".equals(getPerms(statusJson))); } - /** - * Validate create and delete calls. - */ - @Test - @TestDir - @TestJetty - @TestHdfs - public void testCreateDelete() throws Exception { - final String dir1 = "/testCreateDelete1"; - final String path1 = dir1 + "/file1"; - final String dir2 = "/testCreateDelete2"; - final String path2 = dir2 + "/file2"; - - createHttpFSServer(false, false); - final Configuration conf = HttpFSServerWebApp.get() - .get(FileSystemAccess.class).getFileSystemConfiguration(); - conf.setLong(FS_TRASH_INTERVAL_KEY, 5); - writeConf(conf, "hdfs-site.xml"); - - FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); - fs.mkdirs(new Path(dir1)); - - createWithHttp(path1, null); - deleteWithHttp(path1, null, null, null); - - fs.mkdirs(new Path(dir2)); - - createWithHttp(path2, null); - deleteWithHttp(path2, null, null, true); - } - /** * Validate XAttr get/set/remove calls. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html index 3150d87bdce..80b38e7165f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html @@ -165,32 +165,11 @@ -
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js index 0895eb967a6..cb16eac7b12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js @@ -82,47 +82,23 @@ function delete_path(inode_name, absolute_file_path) { $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name - + " ?"); - $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); - $('#delete-trash-prompt').text("Skipping Trash might delete file forever." - + " Do you want to skip-trash " + inode_name - + " ? (default behaviour - No)"); + + " ?"); - $('#skip-trash-button').click(function () { - // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true - var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true&skiptrash=true'; - $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#skip-trash-button').button('reset'); - }); - }) - $('#trash-button').click(function () { + $('#delete-button').click(function() { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; - $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#trash-button').button('reset'); - }); - }) + '?op=DELETE' + '&recursive=true'; + $.ajax(url, + { type: 'DELETE' + }).done(function(data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function() { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + }); + }) $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 2703349db87..e7bf32fcc05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -55,10 +55,8 @@ import javax.ws.rs.core.StreamingOutput; import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -121,9 +119,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.ResourceFilters; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; - /** Web-hdfs NameNode implementation. */ @Path("") @ResourceFilters(ParamFilter.class) @@ -1508,13 +1503,10 @@ public class NamenodeWebHdfsMethods { @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName, - @QueryParam(DeleteSkipTrashParam.NAME) - @DefaultValue(DeleteSkipTrashParam.DEFAULT) - final DeleteSkipTrashParam skiptrash + final SnapshotNameParam snapshotName ) throws IOException, InterruptedException { return delete(ugi, delegation, username, doAsUser, ROOT, op, recursive, - snapshotName, skiptrash); + snapshotName); } /** Handle HTTP DELETE request. */ @@ -1535,53 +1527,34 @@ public class NamenodeWebHdfsMethods { @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName, - @QueryParam(DeleteSkipTrashParam.NAME) - @DefaultValue(DeleteSkipTrashParam.DEFAULT) - final DeleteSkipTrashParam skiptrash + final SnapshotNameParam snapshotName ) throws IOException, InterruptedException { - init(ugi, delegation, username, doAsUser, path, op, recursive, - snapshotName, skiptrash); + init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName); - return doAs(ugi, () -> delete( - path.getAbsolutePath(), op, recursive, snapshotName, skiptrash)); + return doAs(ugi, new PrivilegedExceptionAction() { + @Override + public Response run() throws IOException { + return delete(ugi, delegation, username, doAsUser, + path.getAbsolutePath(), op, recursive, snapshotName); + } + }); } protected Response delete( + final UserGroupInformation ugi, + final DelegationParam delegation, + final UserParam username, + final DoAsParam doAsUser, final String fullpath, final DeleteOpParam op, final RecursiveParam recursive, - final SnapshotNameParam snapshotName, - final DeleteSkipTrashParam skipTrash) throws IOException { + final SnapshotNameParam snapshotName + ) throws IOException { final ClientProtocol cp = getRpcClientProtocol(); switch(op.getValue()) { case DELETE: { - Configuration conf = - (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); - long trashInterval = - conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); - if (trashInterval > 0 && !skipTrash.getValue()) { - LOG.info("{} is {} , trying to archive {} instead of removing", - FS_TRASH_INTERVAL_KEY, trashInterval, fullpath); - org.apache.hadoop.fs.Path path = - new org.apache.hadoop.fs.Path(fullpath); - Configuration clonedConf = new Configuration(conf); - // To avoid caching FS objects and prevent OOM issues - clonedConf.set("fs.hdfs.impl.disable.cache", "true"); - FileSystem fs = FileSystem.get(clonedConf); - boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, - clonedConf); - if (movedToTrash) { - final String js = JsonUtil.toJsonString("boolean", true); - return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); - } - // Same is the behavior with Delete shell command. - // If moveToAppropriateTrash() returns false, file deletion - // is attempted rather than throwing Error. - LOG.debug("Could not move {} to Trash, attempting removal", fullpath); - } final boolean b = cp.delete(fullpath, recursive.getValue()); final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index eb7ca350489..3f0509a2297 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -166,32 +166,11 @@
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index d9371bb9288..ea8b0accbde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -83,46 +83,22 @@ $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name + " ?"); - $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); - $('#delete-trash-prompt').text("Skipping Trash might delete file forever." - + " Do you want to skip-trash " + inode_name - + " ? (default behaviour - No)"); - $('#skip-trash-button').click(function () { - // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true - var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true&skiptrash=true'; - $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#skip-trash-button').button('reset'); - }); - }) - $('#trash-button').click(function () { + $('#delete-button').click(function() { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; - $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#trash-button').button('reset'); - }); - }) + '?op=DELETE' + '&recursive=true'; + $.ajax(url, + { type: 'DELETE' + }).done(function(data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function() { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + }); + }) $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 07891ae64b3..3fb68d40e92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -462,11 +462,7 @@ See also: [`destination`](#Destination), [FileSystem](../../api/org/apache/hadoo * Submit a HTTP DELETE request. curl -i -X DELETE "http://:/webhdfs/v1/?op=DELETE - [&recursive=][&skiptrash=]" - - Default values of queryparams if not provided: - 1. recursive: false - 2. skiptrash: false + [&recursive=]" The client receives a response with a [`boolean` JSON object](#Boolean_JSON_Schema): diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 698c323e9a9..68087c4c4ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.web; -import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; @@ -60,8 +58,6 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Random; -import org.apache.hadoop.hdfs.web.resources.DeleteSkipTrashParam; -import org.apache.hadoop.hdfs.web.resources.RecursiveParam; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.QuotaUsage; @@ -1563,12 +1559,8 @@ public class TestWebHDFS { HttpURLConnection.HTTP_OK, conn.getResponseCode()); JSONObject responseJson = new JSONObject(response); - if (!TYPE.equals("DELETE")) { - Assert.assertTrue("Response didn't give us a location. " + response, - responseJson.has("Location")); - } else { - Assert.assertTrue(responseJson.getBoolean("boolean")); - } + Assert.assertTrue("Response didn't give us a location. " + response, + responseJson.has("Location")); //Test that the DN allows CORS on Create if(TYPE.equals("CREATE")) { @@ -1580,15 +1572,14 @@ public class TestWebHDFS { } } + @Test /** * Test that when "&noredirect=true" is added to operations CREATE, APPEND, * OPEN, and GETFILECHECKSUM the response (which is usually a 307 temporary * redirect) is a 200 with JSON that contains the redirected location */ - @Test public void testWebHdfsNoRedirect() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf(); - conf.setLong(FS_TRASH_INTERVAL_KEY, 5); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); LOG.info("Started cluster"); InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); @@ -1627,26 +1618,6 @@ public class TestWebHDFS { + Param.toSortedString("&", new NoRedirectParam(true))); LOG.info("Sending append request " + url); checkResponseContainsLocation(url, "POST"); - - // setup some permission to allow moving file to .Trash location - cluster.getFileSystem().setPermission(new Path("/testWebHdfsNoRedirect"), - new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); - Path userDir = new Path(FileSystem.USER_HOME_PREFIX); - Path trashDir = new Path(FileSystem.USER_HOME_PREFIX, DEFAULT_HADOOP_HTTP_STATIC_USER); - Path trashPath = new Path(FileSystem.USER_HOME_PREFIX, - new Path(DEFAULT_HADOOP_HTTP_STATIC_USER, FileSystem.TRASH_PREFIX)); - cluster.getFileSystem().mkdirs(userDir, FsPermission.getDirDefault()); - cluster.getFileSystem().mkdir(trashDir, FsPermission.getDirDefault()); - cluster.getFileSystem().mkdir(trashPath, FsPermission.getDirDefault()); - cluster.getFileSystem().setOwner(trashPath, DEFAULT_HADOOP_HTTP_STATIC_USER, HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT); - cluster.getFileSystem().setPermission(new Path("/"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); - - url = new URL("http", addr.getHostString(), addr.getPort(), - WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=DELETE" - + Param.toSortedString("&", new RecursiveParam(true)) - + Param.toSortedString("&", new DeleteSkipTrashParam(false))); - LOG.info("Sending append request " + url); - checkResponseContainsLocation(url, "DELETE"); } @Test