HDFS-15052. WebHDFS getTrashRoot leads to OOM due to FileSystem object creation. (#1758)

(cherry picked from commit 2338d25dc7)

 Conflicts:
        hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

(cherry picked from commit 610805ec72)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
This commit is contained in:
Masatake Iwasaki 2020-02-21 11:56:07 +09:00
parent ce06503599
commit 3fc1c44974
4 changed files with 124 additions and 15 deletions

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -772,7 +773,7 @@ public class DFSUtilClient {
* @param ugi {@link UserGroupInformation} of current user. * @param ugi {@link UserGroupInformation} of current user.
* @return the home directory of current user. * @return the home directory of current user.
*/ */
public static Path getHomeDirectory(Configuration conf, public static String getHomeDirectory(Configuration conf,
UserGroupInformation ugi) { UserGroupInformation ugi) {
String userHomePrefix = HdfsClientConfigKeys String userHomePrefix = HdfsClientConfigKeys
.DFS_USER_HOME_DIR_PREFIX_DEFAULT; .DFS_USER_HOME_DIR_PREFIX_DEFAULT;
@ -781,6 +782,31 @@ public class DFSUtilClient {
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT); HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
} }
return new Path(userHomePrefix + "/" + ugi.getShortUserName()); return userHomePrefix + Path.SEPARATOR + ugi.getShortUserName();
}
/**
* Returns trash root in non-encryption zone.
* @param conf configuration.
* @param ugi user of trash owner.
* @return unqualified path of trash root.
*/
public static String getTrashRoot(Configuration conf,
UserGroupInformation ugi) {
return getHomeDirectory(conf, ugi)
+ Path.SEPARATOR + FileSystem.TRASH_PREFIX;
}
/**
* Returns trash root in encryption zone.
* @param ez encryption zone.
* @param ugi user of trash owner.
* @return unqualified path of trash root.
*/
public static String getEZTrashRoot(EncryptionZone ez,
UserGroupInformation ugi) {
String ezpath = ez.getPath();
return (ezpath.equals("/") ? ezpath : ezpath + Path.SEPARATOR)
+ FileSystem.TRASH_PREFIX + Path.SEPARATOR + ugi.getShortUserName();
} }
} }

View File

@ -197,7 +197,8 @@ public class DistributedFileSystem extends FileSystem
@Override @Override
public Path getHomeDirectory() { public Path getHomeDirectory() {
return makeQualified(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi)); return makeQualified(
new Path(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi)));
} }
/** /**
@ -2635,8 +2636,7 @@ public class DistributedFileSystem extends FileSystem
EncryptionZone ez = dfs.getEZForPath(parentSrc); EncryptionZone ez = dfs.getEZForPath(parentSrc);
if ((ez != null)) { if ((ez != null)) {
return this.makeQualified( return this.makeQualified(
new Path(new Path(ez.getPath(), FileSystem.TRASH_PREFIX), new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi)));
dfs.ugi.getShortUserName()));
} }
} catch (IOException e) { } catch (IOException e) {
DFSClient.LOG.warn("Exception in checking the encryption zone for the " + DFSClient.LOG.warn("Exception in checking the encryption zone for the " +
@ -2663,7 +2663,8 @@ public class DistributedFileSystem extends FileSystem
// Get EZ Trash roots // Get EZ Trash roots
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones(); final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
while (it.hasNext()) { while (it.hasNext()) {
Path ezTrashRoot = new Path(it.next().getPath(), EncryptionZone ez = it.next();
Path ezTrashRoot = new Path(ez.getPath(),
FileSystem.TRASH_PREFIX); FileSystem.TRASH_PREFIX);
if (!exists(ezTrashRoot)) { if (!exists(ezTrashRoot)) {
continue; continue;
@ -2675,7 +2676,7 @@ public class DistributedFileSystem extends FileSystem
} }
} }
} else { } else {
Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName()); Path userTrash = new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi));
try { try {
ret.add(getFileStatus(userTrash)); ret.add(getFileStatus(userTrash));
} catch (FileNotFoundException ignored) { } catch (FileNotFoundException ignored) {

View File

@ -62,7 +62,6 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
@ -76,6 +75,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@ -1097,7 +1097,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }
case GETHOMEDIRECTORY: { case GETHOMEDIRECTORY: {
String userHome = DFSUtilClient.getHomeDirectory(conf, ugi).toString(); String userHome = DFSUtilClient.getHomeDirectory(conf, ugi);
final String js = JsonUtil.toJsonString("Path", userHome); final String js = JsonUtil.toJsonString("Path", userHome);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }
@ -1136,7 +1136,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok().build(); return Response.ok().build();
} }
case GETTRASHROOT: { case GETTRASHROOT: {
final String trashPath = getTrashRoot(fullpath, conf); final String trashPath = getTrashRoot(conf, fullpath);
final String jsonStr = JsonUtil.toJsonString("Path", trashPath); final String jsonStr = JsonUtil.toJsonString("Path", trashPath);
return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build(); return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build();
} }
@ -1178,11 +1178,39 @@ public class NamenodeWebHdfsMethods {
} }
} }
private static String getTrashRoot(String fullPath, private String getTrashRoot(Configuration conf, String fullPath)
Configuration conf) throws IOException { throws IOException {
FileSystem fs = FileSystem.get(conf != null ? conf : new Configuration()); UserGroupInformation ugi= UserGroupInformation.getCurrentUser();
return fs.getTrashRoot( String parentSrc = getParent(fullPath);
new org.apache.hadoop.fs.Path(fullPath)).toUri().getPath(); EncryptionZone ez = getRpcClientProtocol().getEZForPath(
parentSrc != null ? parentSrc : fullPath);
String trashRoot;
if (ez != null) {
trashRoot = DFSUtilClient.getEZTrashRoot(ez, ugi);
} else {
trashRoot = DFSUtilClient.getTrashRoot(conf, ugi);
}
return trashRoot;
}
/**
* Returns the parent of a path in the same way as Path#getParent.
* @return the parent of a path or null if at root
*/
public String getParent(String path) {
int lastSlash = path.lastIndexOf('/');
int start = 0;
if ((path.length() == start) || // empty path
(lastSlash == start && path.length() == start + 1)) { // at root
return null;
}
String parent;
if (lastSlash == -1) {
parent = org.apache.hadoop.fs.Path.CUR_DIR;
} else {
parent = path.substring(0, lastSlash == start ? start + 1 : lastSlash);
}
return parent;
} }
private static DirectoryListing getDirectoryListing(final ClientProtocol cp, private static DirectoryListing getDirectoryListing(final ClientProtocol cp,

View File

@ -34,6 +34,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.EOFException; import java.io.EOFException;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
@ -47,6 +48,7 @@ import java.net.URL;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
@ -58,11 +60,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.QuotaUsage;
@ -80,6 +84,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.TestFileCreation; import org.apache.hadoop.hdfs.TestFileCreation;
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -1397,6 +1403,54 @@ public class TestWebHDFS {
} }
} }
@Test
public void testGetEZTrashRoot() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
FileSystemTestHelper fsHelper = new FileSystemTestHelper();
File testRootDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"jceks://file" + new Path(testRootDir.toString(), "test.jks").toUri());
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME);
HdfsAdmin dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
dfs.getClient().setKeyProvider(
cluster.getNameNode().getNamesystem().getProvider());
final String testkey = "test_key";
DFSTestUtil.createKey(testkey, cluster, conf);
final Path zone1 = new Path("/zone1");
dfs.mkdirs(zone1, new FsPermission((short)0700));
dfsAdmin.createEncryptionZone(zone1, testkey,
EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH));
final Path insideEZ = new Path(zone1, "insideEZ");
dfs.mkdirs(insideEZ, new FsPermission((short)0700));
assertEquals(
dfs.getTrashRoot(insideEZ).toUri().getPath(),
webhdfs.getTrashRoot(insideEZ).toUri().getPath());
final Path outsideEZ = new Path("/outsideEZ");
dfs.mkdirs(outsideEZ, new FsPermission((short)0755));
assertEquals(
dfs.getTrashRoot(outsideEZ).toUri().getPath(),
webhdfs.getTrashRoot(outsideEZ).toUri().getPath());
final Path root = new Path("/");
assertEquals(
dfs.getTrashRoot(root).toUri().getPath(),
webhdfs.getTrashRoot(root).toUri().getPath());
assertEquals(
webhdfs.getTrashRoot(root).toUri().getPath(),
webhdfs.getTrashRoot(zone1).toUri().getPath());
assertEquals(
webhdfs.getTrashRoot(outsideEZ).toUri().getPath(),
webhdfs.getTrashRoot(zone1).toUri().getPath());
}
@Test @Test
public void testStoragePolicy() throws Exception { public void testStoragePolicy() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;