HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. Contributed by Akira Ajisaka.
This commit is contained in:
parent
41c94a636b
commit
b3fee1d2bf
|
@ -61,8 +61,10 @@ public interface FileSubclusterResolver {
|
|||
* cache.
|
||||
*
|
||||
* @param path Path to get the mount points under.
|
||||
* @return List of mount points present at this path or zero-length list if
|
||||
* none are found.
|
||||
* @return List of mount points present at this path. Return zero-length
|
||||
* list if the path is a mount point but there are no mount points
|
||||
* under the path. Return null if the path is not a mount point
|
||||
* and there are no mount points under the path.
|
||||
* @throws IOException Throws exception if the data is not available.
|
||||
*/
|
||||
List<String> getMountPoints(String path) throws IOException;
|
||||
|
|
|
@ -726,6 +726,9 @@ public class RouterClientProtocol implements ClientProtocol {
|
|||
date = dates.get(src);
|
||||
}
|
||||
ret = getMountPointStatus(src, children.size(), date);
|
||||
} else if (children != null) {
|
||||
// The src is a mount point, but there are no files or directories
|
||||
ret = getMountPointStatus(src, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1734,13 +1737,26 @@ public class RouterClientProtocol implements ClientProtocol {
|
|||
FsPermission permission = FsPermission.getDirDefault();
|
||||
String owner = this.superUser;
|
||||
String group = this.superGroup;
|
||||
try {
|
||||
// TODO support users, it should be the user for the pointed folder
|
||||
UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
|
||||
owner = ugi.getUserName();
|
||||
group = ugi.getPrimaryGroupName();
|
||||
} catch (IOException e) {
|
||||
LOG.error("Cannot get the remote user: {}", e.getMessage());
|
||||
if (subclusterResolver instanceof MountTableResolver) {
|
||||
try {
|
||||
MountTableResolver mountTable = (MountTableResolver) subclusterResolver;
|
||||
MountTable entry = mountTable.getMountPoint(name);
|
||||
if (entry != null) {
|
||||
permission = entry.getMode();
|
||||
owner = entry.getOwnerName();
|
||||
group = entry.getGroupName();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Cannot get mount point: {}", e.getMessage());
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
|
||||
owner = ugi.getUserName();
|
||||
group = ugi.getPrimaryGroupName();
|
||||
} catch (IOException e) {
|
||||
LOG.error("Cannot get remote user: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
long inodeId = 0;
|
||||
return new HdfsFileStatus.Builder()
|
||||
|
|
|
@ -87,11 +87,12 @@ public class RouterQuotaUpdateService extends PeriodicService {
|
|||
|
||||
QuotaUsage currentQuotaUsage = null;
|
||||
|
||||
// Check whether destination path exists in filesystem. If destination
|
||||
// is not present, reset the usage. For other mount entry get current
|
||||
// quota usage
|
||||
// Check whether destination path exists in filesystem. When the
|
||||
// mtime is zero, the destination is not present and reset the usage.
|
||||
// This is because mount table does not have mtime.
|
||||
// For other mount entry get current quota usage
|
||||
HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
|
||||
if (ret == null) {
|
||||
if (ret == null || ret.getModificationTime() == 0) {
|
||||
currentQuotaUsage = new RouterQuotaUsage.Builder()
|
||||
.fileAndDirectoryCount(0)
|
||||
.quota(nsQuota)
|
||||
|
|
|
@ -303,15 +303,16 @@ public class MockResolver
|
|||
|
||||
@Override
|
||||
public List<String> getMountPoints(String path) throws IOException {
|
||||
// Mounts only supported under root level
|
||||
if (!path.equals("/")) {
|
||||
return null;
|
||||
}
|
||||
List<String> mounts = new ArrayList<>();
|
||||
if (path.equals("/")) {
|
||||
// Mounts only supported under root level
|
||||
for (String mount : this.locations.keySet()) {
|
||||
if (mount.length() > 1) {
|
||||
// Remove leading slash, this is the behavior of the mount tree,
|
||||
// return only names.
|
||||
mounts.add(mount.replace("/", ""));
|
||||
}
|
||||
for (String mount : this.locations.keySet()) {
|
||||
if (mount.length() > 1) {
|
||||
// Remove leading slash, this is the behavior of the mount tree,
|
||||
// return only names.
|
||||
mounts.add(mount.replace("/", ""));
|
||||
}
|
||||
}
|
||||
return mounts;
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
|
@ -43,8 +44,12 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
|||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
@ -59,9 +64,11 @@ public class TestRouterMountTable {
|
|||
private static RouterContext routerContext;
|
||||
private static MountTableResolver mountTable;
|
||||
private static ClientProtocol routerProtocol;
|
||||
private static long startTime;
|
||||
|
||||
@BeforeClass
|
||||
public static void globalSetUp() throws Exception {
|
||||
startTime = Time.now();
|
||||
|
||||
// Build and start a federated cluster
|
||||
cluster = new StateStoreDFSCluster(false, 1);
|
||||
|
@ -92,6 +99,21 @@ public class TestRouterMountTable {
|
|||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void clearMountTable() throws IOException {
|
||||
RouterClient client = routerContext.getAdminClient();
|
||||
MountTableManager mountTableManager = client.getMountTableManager();
|
||||
GetMountTableEntriesRequest req1 =
|
||||
GetMountTableEntriesRequest.newInstance("/");
|
||||
GetMountTableEntriesResponse response =
|
||||
mountTableManager.getMountTableEntries(req1);
|
||||
for (MountTable entry : response.getEntries()) {
|
||||
RemoveMountTableEntryRequest req2 =
|
||||
RemoveMountTableEntryRequest.newInstance(entry.getSourcePath());
|
||||
mountTableManager.removeMountTableEntry(req2);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadOnly() throws Exception {
|
||||
|
||||
|
@ -157,7 +179,6 @@ public class TestRouterMountTable {
|
|||
*/
|
||||
@Test
|
||||
public void testListFilesTime() throws Exception {
|
||||
Long beforeCreatingTime = Time.now();
|
||||
// Add mount table entry
|
||||
MountTable addEntry = MountTable.newInstance(
|
||||
"/testdir", Collections.singletonMap("ns0", "/testdir"));
|
||||
|
@ -211,10 +232,40 @@ public class TestRouterMountTable {
|
|||
Long expectedTime = pathModTime.get(currentFile);
|
||||
|
||||
assertEquals(currentFile, fileName);
|
||||
assertTrue(currentTime > beforeCreatingTime);
|
||||
assertTrue(currentTime > startTime);
|
||||
assertEquals(currentTime, expectedTime);
|
||||
}
|
||||
// Verify the total number of results found/matched
|
||||
assertEquals(pathModTime.size(), listing.getPartialListing().length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that the file listing contains correct permission.
|
||||
*/
|
||||
@Test
|
||||
public void testMountTablePermissions() throws Exception {
|
||||
// Add mount table entries
|
||||
MountTable addEntry = MountTable.newInstance(
|
||||
"/testdir1", Collections.singletonMap("ns0", "/testdir1"));
|
||||
addEntry.setGroupName("group1");
|
||||
addEntry.setOwnerName("owner1");
|
||||
addEntry.setMode(FsPermission.createImmutable((short)0775));
|
||||
assertTrue(addMountTable(addEntry));
|
||||
addEntry = MountTable.newInstance(
|
||||
"/testdir2", Collections.singletonMap("ns0", "/testdir2"));
|
||||
addEntry.setGroupName("group2");
|
||||
addEntry.setOwnerName("owner2");
|
||||
addEntry.setMode(FsPermission.createImmutable((short)0755));
|
||||
assertTrue(addMountTable(addEntry));
|
||||
|
||||
HdfsFileStatus fs = routerProtocol.getFileInfo("/testdir1");
|
||||
assertEquals("group1", fs.getGroup());
|
||||
assertEquals("owner1", fs.getOwner());
|
||||
assertEquals((short) 0775, fs.getPermission().toShort());
|
||||
|
||||
fs = routerProtocol.getFileInfo("/testdir2");
|
||||
assertEquals("group2", fs.getGroup());
|
||||
assertEquals("owner2", fs.getOwner());
|
||||
assertEquals((short) 0755, fs.getPermission().toShort());
|
||||
}
|
||||
}
|
|
@ -123,8 +123,9 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
|
|||
RouterContext rc = getRouterContext();
|
||||
Router router = rc.getRouter();
|
||||
FileSubclusterResolver subclusterResolver = router.getSubclusterResolver();
|
||||
for (String mount : subclusterResolver.getMountPoints(path)) {
|
||||
requiredPaths.add(mount);
|
||||
List<String> mountList = subclusterResolver.getMountPoints(path);
|
||||
if (mountList != null) {
|
||||
requiredPaths.addAll(mountList);
|
||||
}
|
||||
|
||||
// Get files/dirs from the Namenodes
|
||||
|
|
Loading…
Reference in New Issue