HDFS-14085. RBF: LS command for root shows wrong owner and permission information. Contributed by Ayush Saxena.

This commit is contained in:
Surendra Singh Lilhore 2018-12-04 12:23:56 +05:30 committed by Brahma Reddy Battula
parent 19088e1b49
commit b320caecb3
3 changed files with 279 additions and 82 deletions

View File

@ -27,6 +27,7 @@ import java.net.URLConnection;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
@ -205,4 +206,24 @@ public final class FederationUtil {
return path.charAt(parent.length()) == Path.SEPARATOR_CHAR return path.charAt(parent.length()) == Path.SEPARATOR_CHAR
|| parent.equals(Path.SEPARATOR); || parent.equals(Path.SEPARATOR);
} }
}
/**
* Add the the number of children for an existing HdfsFileStatus object.
* @param dirStatus HdfsfileStatus object.
* @param children number of children to be added.
* @return HdfsFileStatus with the number of children specified.
*/
public static HdfsFileStatus updateMountPointStatus(HdfsFileStatus dirStatus,
int children) {
return new HdfsFileStatus.Builder().atime(dirStatus.getAccessTime())
.blocksize(dirStatus.getBlockSize()).children(children)
.ecPolicy(dirStatus.getErasureCodingPolicy())
.feInfo(dirStatus.getFileEncryptionInfo()).fileId(dirStatus.getFileId())
.group(dirStatus.getGroup()).isdir(dirStatus.isDir())
.length(dirStatus.getLen()).mtime(dirStatus.getModificationTime())
.owner(dirStatus.getOwner()).path(dirStatus.getLocalNameInBytes())
.perm(dirStatus.getPermission()).replication(dirStatus.getReplication())
.storagePolicy(dirStatus.getStoragePolicy())
.symlink(dirStatus.getSymlinkInBytes()).build();
}
}

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.federation.router; package org.apache.hadoop.hdfs.server.federation.router;
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.updateMountPointStatus;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.BatchedRemoteIterator; import org.apache.hadoop.fs.BatchedRemoteIterator;
@ -675,7 +676,6 @@ public class RouterClientProtocol implements ClientProtocol {
if (dates != null && dates.containsKey(child)) { if (dates != null && dates.containsKey(child)) {
date = dates.get(child); date = dates.get(child);
} }
// TODO add number of children
HdfsFileStatus dirStatus = getMountPointStatus(child, 0, date); HdfsFileStatus dirStatus = getMountPointStatus(child, 0, date);
// This may overwrite existing listing entries with the mount point // This may overwrite existing listing entries with the mount point
@ -1669,12 +1669,13 @@ public class RouterClientProtocol implements ClientProtocol {
// Get the file info from everybody // Get the file info from everybody
Map<RemoteLocation, HdfsFileStatus> results = Map<RemoteLocation, HdfsFileStatus> results =
rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class); rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class);
int children=0;
// We return the first file // We return the first file
HdfsFileStatus dirStatus = null; HdfsFileStatus dirStatus = null;
for (RemoteLocation loc : locations) { for (RemoteLocation loc : locations) {
HdfsFileStatus fileStatus = results.get(loc); HdfsFileStatus fileStatus = results.get(loc);
if (fileStatus != null) { if (fileStatus != null) {
children += fileStatus.getChildrenNum();
if (!fileStatus.isDirectory()) { if (!fileStatus.isDirectory()) {
return fileStatus; return fileStatus;
} else if (dirStatus == null) { } else if (dirStatus == null) {
@ -1682,7 +1683,10 @@ public class RouterClientProtocol implements ClientProtocol {
} }
} }
} }
return dirStatus; if (dirStatus != null) {
return updateMountPointStatus(dirStatus, children);
}
return null;
} }
/** /**
@ -1738,12 +1742,23 @@ public class RouterClientProtocol implements ClientProtocol {
String group = this.superGroup; String group = this.superGroup;
if (subclusterResolver instanceof MountTableResolver) { if (subclusterResolver instanceof MountTableResolver) {
try { try {
String mName = name.startsWith("/") ? name : "/" + name;
MountTableResolver mountTable = (MountTableResolver) subclusterResolver; MountTableResolver mountTable = (MountTableResolver) subclusterResolver;
MountTable entry = mountTable.getMountPoint(name); MountTable entry = mountTable.getMountPoint(mName);
if (entry != null) { if (entry != null) {
permission = entry.getMode(); HdfsFileStatus fInfo = getFileInfoAll(entry.getDestinations(),
owner = entry.getOwnerName(); new RemoteMethod("getFileInfo", new Class<?>[] {String.class},
group = entry.getGroupName(); new RemoteParam()));
if (fInfo != null) {
permission = fInfo.getPermission();
owner = fInfo.getOwner();
group = fInfo.getGroup();
childrenNum = fInfo.getChildrenNum();
} else {
permission = entry.getMode();
owner = entry.getOwnerName();
group = entry.getGroupName();
}
} }
} catch (IOException e) { } catch (IOException e) {
LOG.error("Cannot get mount point: {}", e.getMessage()); LOG.error("Cannot get mount point: {}", e.getMessage());

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -60,18 +61,21 @@ import org.junit.Test;
public class TestRouterMountTable { public class TestRouterMountTable {
private static StateStoreDFSCluster cluster; private static StateStoreDFSCluster cluster;
private static NamenodeContext nnContext; private static NamenodeContext nnContext0;
private static NamenodeContext nnContext1;
private static RouterContext routerContext; private static RouterContext routerContext;
private static MountTableResolver mountTable; private static MountTableResolver mountTable;
private static ClientProtocol routerProtocol; private static ClientProtocol routerProtocol;
private static long startTime; private static long startTime;
private static FileSystem nnFs0;
private static FileSystem nnFs1;
@BeforeClass @BeforeClass
public static void globalSetUp() throws Exception { public static void globalSetUp() throws Exception {
startTime = Time.now(); startTime = Time.now();
// Build and start a federated cluster // Build and start a federated cluster
cluster = new StateStoreDFSCluster(false, 1); cluster = new StateStoreDFSCluster(false, 2);
Configuration conf = new RouterConfigBuilder() Configuration conf = new RouterConfigBuilder()
.stateStore() .stateStore()
.admin() .admin()
@ -83,7 +87,10 @@ public class TestRouterMountTable {
cluster.waitClusterUp(); cluster.waitClusterUp();
// Get the end points // Get the end points
nnContext = cluster.getRandomNamenode(); nnContext0 = cluster.getNamenode("ns0", null);
nnContext1 = cluster.getNamenode("ns1", null);
nnFs0 = nnContext0.getFileSystem();
nnFs1 = nnContext1.getFileSystem();
routerContext = cluster.getRandomRouter(); routerContext = cluster.getRandomRouter();
Router router = routerContext.getRouter(); Router router = routerContext.getRouter();
routerProtocol = routerContext.getClient().getNamenode(); routerProtocol = routerContext.getClient().getNamenode();
@ -129,12 +136,11 @@ public class TestRouterMountTable {
assertTrue(addMountTable(regularEntry)); assertTrue(addMountTable(regularEntry));
// Create a folder which should show in all locations // Create a folder which should show in all locations
final FileSystem nnFs = nnContext.getFileSystem();
final FileSystem routerFs = routerContext.getFileSystem(); final FileSystem routerFs = routerContext.getFileSystem();
assertTrue(routerFs.mkdirs(new Path("/regular/newdir"))); assertTrue(routerFs.mkdirs(new Path("/regular/newdir")));
FileStatus dirStatusNn = FileStatus dirStatusNn =
nnFs.getFileStatus(new Path("/testdir/newdir")); nnFs0.getFileStatus(new Path("/testdir/newdir"));
assertTrue(dirStatusNn.isDirectory()); assertTrue(dirStatusNn.isDirectory());
FileStatus dirStatusRegular = FileStatus dirStatusRegular =
routerFs.getFileStatus(new Path("/regular/newdir")); routerFs.getFileStatus(new Path("/regular/newdir"));
@ -179,93 +185,248 @@ public class TestRouterMountTable {
*/ */
@Test @Test
public void testListFilesTime() throws Exception { public void testListFilesTime() throws Exception {
// Add mount table entry try {
MountTable addEntry = MountTable.newInstance( // Add mount table entry
"/testdir", Collections.singletonMap("ns0", "/testdir")); MountTable addEntry = MountTable.newInstance("/testdir",
assertTrue(addMountTable(addEntry)); Collections.singletonMap("ns0", "/testdir"));
addEntry = MountTable.newInstance( assertTrue(addMountTable(addEntry));
"/testdir2", Collections.singletonMap("ns0", "/testdir2")); addEntry = MountTable.newInstance("/testdir2",
assertTrue(addMountTable(addEntry)); Collections.singletonMap("ns0", "/testdir2"));
addEntry = MountTable.newInstance( assertTrue(addMountTable(addEntry));
"/testdir/subdir", Collections.singletonMap("ns0", "/testdir/subdir")); addEntry = MountTable.newInstance("/testdir/subdir",
assertTrue(addMountTable(addEntry)); Collections.singletonMap("ns0", "/testdir/subdir"));
addEntry = MountTable.newInstance( assertTrue(addMountTable(addEntry));
"/testdir3/subdir1", Collections.singletonMap("ns0", "/testdir3")); addEntry = MountTable.newInstance("/testdir3/subdir1",
assertTrue(addMountTable(addEntry)); Collections.singletonMap("ns0", "/testdir3"));
addEntry = MountTable.newInstance( assertTrue(addMountTable(addEntry));
"/testA/testB/testC/testD", Collections.singletonMap("ns0", "/test")); addEntry = MountTable.newInstance("/testA/testB/testC/testD",
assertTrue(addMountTable(addEntry)); Collections.singletonMap("ns0", "/test"));
assertTrue(addMountTable(addEntry));
// Create test dir in NN // Create test dir in NN
final FileSystem nnFs = nnContext.getFileSystem(); assertTrue(nnFs0.mkdirs(new Path("/newdir")));
assertTrue(nnFs.mkdirs(new Path("/newdir")));
Map<String, Long> pathModTime = new TreeMap<>(); Map<String, Long> pathModTime = new TreeMap<>();
for (String mount : mountTable.getMountPoints("/")) { for (String mount : mountTable.getMountPoints("/")) {
if (mountTable.getMountPoint("/"+mount) != null) { if (mountTable.getMountPoint("/" + mount) != null) {
pathModTime.put(mount, mountTable.getMountPoint("/"+mount) pathModTime.put(mount,
.getDateModified()); mountTable.getMountPoint("/" + mount).getDateModified());
} else { } else {
List<MountTable> entries = mountTable.getMounts("/"+mount); List<MountTable> entries = mountTable.getMounts("/" + mount);
for (MountTable entry : entries) { for (MountTable entry : entries) {
if (pathModTime.get(mount) == null || if (pathModTime.get(mount) == null
pathModTime.get(mount) < entry.getDateModified()) { || pathModTime.get(mount) < entry.getDateModified()) {
pathModTime.put(mount, entry.getDateModified()); pathModTime.put(mount, entry.getDateModified());
}
} }
} }
} }
} FileStatus[] iterator = nnFs0.listStatus(new Path("/"));
FileStatus[] iterator = nnFs.listStatus(new Path("/")); for (FileStatus file : iterator) {
for (FileStatus file : iterator) { pathModTime.put(file.getPath().getName(), file.getModificationTime());
pathModTime.put(file.getPath().getName(), file.getModificationTime()); }
} // Fetch listing
// Fetch listing DirectoryListing listing =
DirectoryListing listing = routerProtocol.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
routerProtocol.getListing("/", HdfsFileStatus.EMPTY_NAME, false); Iterator<String> pathModTimeIterator = pathModTime.keySet().iterator();
Iterator<String> pathModTimeIterator = pathModTime.keySet().iterator();
// Match date/time for each path returned // Match date/time for each path returned
for(HdfsFileStatus f : listing.getPartialListing()) { for (HdfsFileStatus f : listing.getPartialListing()) {
String fileName = pathModTimeIterator.next(); String fileName = pathModTimeIterator.next();
String currentFile = f.getFullPath(new Path("/")).getName(); String currentFile = f.getFullPath(new Path("/")).getName();
Long currentTime = f.getModificationTime(); Long currentTime = f.getModificationTime();
Long expectedTime = pathModTime.get(currentFile); Long expectedTime = pathModTime.get(currentFile);
assertEquals(currentFile, fileName); assertEquals(currentFile, fileName);
assertTrue(currentTime > startTime); assertTrue(currentTime > startTime);
assertEquals(currentTime, expectedTime); assertEquals(currentTime, expectedTime);
}
// Verify the total number of results found/matched
assertEquals(pathModTime.size(), listing.getPartialListing().length);
} finally {
nnFs0.delete(new Path("/newdir"), true);
} }
// Verify the total number of results found/matched
assertEquals(pathModTime.size(), listing.getPartialListing().length);
} }
/** /**
* Verify that the file listing contains correct permission. * Verify permission for a mount point when the actual destination is not
* present. It returns the permissions of the mount point.
*/ */
@Test @Test
public void testMountTablePermissions() throws Exception { public void testMountTablePermissionsNoDest() throws IOException {
// Add mount table entries MountTable addEntry;
MountTable addEntry = MountTable.newInstance( addEntry = MountTable.newInstance("/testdir1",
"/testdir1", Collections.singletonMap("ns0", "/testdir1")); Collections.singletonMap("ns0", "/tmp/testdir1"));
addEntry.setGroupName("group1"); addEntry.setGroupName("group1");
addEntry.setOwnerName("owner1"); addEntry.setOwnerName("owner1");
addEntry.setMode(FsPermission.createImmutable((short)0775)); addEntry.setMode(FsPermission.createImmutable((short) 0775));
assertTrue(addMountTable(addEntry)); assertTrue(addMountTable(addEntry));
addEntry = MountTable.newInstance( FileStatus[] list = routerContext.getFileSystem().listStatus(new Path("/"));
"/testdir2", Collections.singletonMap("ns0", "/testdir2")); assertEquals("group1", list[0].getGroup());
addEntry.setGroupName("group2"); assertEquals("owner1", list[0].getOwner());
addEntry.setOwnerName("owner2"); assertEquals((short) 0775, list[0].getPermission().toShort());
addEntry.setMode(FsPermission.createImmutable((short)0755)); }
/**
* Verify permission for a mount point when the actual destination present. It
* returns the permissions of the actual destination pointed by the mount
* point.
*/
@Test
public void testMountTablePermissionsWithDest() throws IOException {
try {
MountTable addEntry = MountTable.newInstance("/testdir",
Collections.singletonMap("ns0", "/tmp/testdir"));
assertTrue(addMountTable(addEntry));
nnFs0.mkdirs(new Path("/tmp/testdir"));
nnFs0.setOwner(new Path("/tmp/testdir"), "Aowner", "Agroup");
nnFs0.setPermission(new Path("/tmp/testdir"),
FsPermission.createImmutable((short) 775));
FileStatus[] list =
routerContext.getFileSystem().listStatus(new Path("/"));
assertEquals("Agroup", list[0].getGroup());
assertEquals("Aowner", list[0].getOwner());
assertEquals((short) 775, list[0].getPermission().toShort());
} finally {
nnFs0.delete(new Path("/tmp"), true);
}
}
/**
* Verify permission for a mount point when the multiple destinations are
* present with both having same permissions. It returns the same actual
* permissions of the actual destinations pointed by the mount point.
*/
@Test
public void testMountTablePermissionsMultiDest() throws IOException {
try {
Map<String, String> destMap = new HashMap<>();
destMap.put("ns0", "/tmp/testdir");
destMap.put("ns1", "/tmp/testdir01");
MountTable addEntry = MountTable.newInstance("/testdir", destMap);
assertTrue(addMountTable(addEntry));
nnFs0.mkdirs(new Path("/tmp/testdir"));
nnFs0.setOwner(new Path("/tmp/testdir"), "Aowner", "Agroup");
nnFs0.setPermission(new Path("/tmp/testdir"),
FsPermission.createImmutable((short) 775));
nnFs1.mkdirs(new Path("/tmp/testdir01"));
nnFs1.setOwner(new Path("/tmp/testdir01"), "Aowner", "Agroup");
nnFs1.setPermission(new Path("/tmp/testdir01"),
FsPermission.createImmutable((short) 775));
FileStatus[] list =
routerContext.getFileSystem().listStatus(new Path("/"));
assertEquals("Agroup", list[0].getGroup());
assertEquals("Aowner", list[0].getOwner());
assertEquals((short) 775, list[0].getPermission().toShort());
} finally {
nnFs0.delete(new Path("/tmp"), true);
nnFs1.delete(new Path("/tmp"), true);
}
}
/**
* Verify permission for a mount point when the multiple destinations are
* present with both having different permissions. It returns the actual
* permissions of either of the actual destinations pointed by the mount
* point.
*/
@Test
public void testMountTablePermissionsMultiDestDifferentPerm()
throws IOException {
try {
Map<String, String> destMap = new HashMap<>();
destMap.put("ns0", "/tmp/testdir");
destMap.put("ns1", "/tmp/testdir01");
MountTable addEntry = MountTable.newInstance("/testdir", destMap);
assertTrue(addMountTable(addEntry));
nnFs0.mkdirs(new Path("/tmp/testdir"));
nnFs0.setOwner(new Path("/tmp/testdir"), "Aowner", "Agroup");
nnFs0.setPermission(new Path("/tmp/testdir"),
FsPermission.createImmutable((short) 775));
nnFs1.mkdirs(new Path("/tmp/testdir01"));
nnFs1.setOwner(new Path("/tmp/testdir01"), "Aowner01", "Agroup01");
nnFs1.setPermission(new Path("/tmp/testdir01"),
FsPermission.createImmutable((short) 755));
FileStatus[] list =
routerContext.getFileSystem().listStatus(new Path("/"));
assertTrue("Agroup".equals(list[0].getGroup())
|| "Agroup01".equals(list[0].getGroup()));
assertTrue("Aowner".equals(list[0].getOwner())
|| "Aowner01".equals(list[0].getOwner()));
assertTrue(((short) 775) == list[0].getPermission().toShort()
|| ((short) 755) == list[0].getPermission().toShort());
} finally {
nnFs0.delete(new Path("/tmp"), true);
nnFs1.delete(new Path("/tmp"), true);
}
}
/**
* Validate whether mount point name gets resolved or not. On successful
* resolution the details returned would be the ones actually set on the mount
* point.
*/
@Test
public void testMountPointResolved() throws IOException {
MountTable addEntry = MountTable.newInstance("/testdir",
Collections.singletonMap("ns0", "/tmp/testdir"));
addEntry.setGroupName("group1");
addEntry.setOwnerName("owner1");
assertTrue(addMountTable(addEntry)); assertTrue(addMountTable(addEntry));
HdfsFileStatus finfo = routerProtocol.getFileInfo("/testdir");
FileStatus[] finfo1 =
routerContext.getFileSystem().listStatus(new Path("/"));
assertEquals("owner1", finfo.getOwner());
assertEquals("owner1", finfo1[0].getOwner());
assertEquals("group1", finfo.getGroup());
assertEquals("group1", finfo1[0].getGroup());
}
HdfsFileStatus fs = routerProtocol.getFileInfo("/testdir1"); /**
assertEquals("group1", fs.getGroup()); * Validate the number of children for the mount point.It must be equal to the
assertEquals("owner1", fs.getOwner()); * number of children of the destination pointed by the mount point.
assertEquals((short) 0775, fs.getPermission().toShort()); */
@Test
public void testMountPointChildren() throws IOException {
try {
MountTable addEntry = MountTable.newInstance("/testdir",
Collections.singletonMap("ns0", "/tmp/testdir"));
assertTrue(addMountTable(addEntry));
nnFs0.mkdirs(new Path("/tmp/testdir"));
nnFs0.mkdirs(new Path("/tmp/testdir/1"));
nnFs0.mkdirs(new Path("/tmp/testdir/2"));
FileStatus[] finfo1 =
routerContext.getFileSystem().listStatus(new Path("/"));
assertEquals(2, ((HdfsFileStatus) finfo1[0]).getChildrenNum());
} finally {
nnFs0.delete(new Path("/tmp"), true);
}
}
fs = routerProtocol.getFileInfo("/testdir2"); /**
assertEquals("group2", fs.getGroup()); * Validate the number of children for the mount point pointing to multiple
assertEquals("owner2", fs.getOwner()); * destinations.It must be equal to the sum of number of children of the
assertEquals((short) 0755, fs.getPermission().toShort()); * destinations pointed by the mount point.
*/
@Test
public void testMountPointChildrenMultiDest() throws IOException {
try {
Map<String, String> destMap = new HashMap<>();
destMap.put("ns0", "/tmp/testdir");
destMap.put("ns1", "/tmp/testdir01");
MountTable addEntry = MountTable.newInstance("/testdir", destMap);
assertTrue(addMountTable(addEntry));
nnFs0.mkdirs(new Path("/tmp/testdir"));
nnFs0.mkdirs(new Path("/tmp/testdir"));
nnFs1.mkdirs(new Path("/tmp/testdir01"));
nnFs0.mkdirs(new Path("/tmp/testdir/1"));
nnFs1.mkdirs(new Path("/tmp/testdir01/1"));
FileStatus[] finfo1 =
routerContext.getFileSystem().listStatus(new Path("/"));
assertEquals(2, ((HdfsFileStatus) finfo1[0]).getChildrenNum());
} finally {
nnFs0.delete(new Path("/tmp"), true);
nnFs0.delete(new Path("/tmp"), true);
}
} }
} }