HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. Contributed by Akira Ajisaka.
This commit is contained in:
parent
8dfd2e5644
commit
7ac5e769fb
|
@ -61,8 +61,10 @@ public interface FileSubclusterResolver {
|
||||||
* cache.
|
* cache.
|
||||||
*
|
*
|
||||||
* @param path Path to get the mount points under.
|
* @param path Path to get the mount points under.
|
||||||
* @return List of mount points present at this path or zero-length list if
|
* @return List of mount points present at this path. Return zero-length
|
||||||
* none are found.
|
* list if the path is a mount point but there are no mount points
|
||||||
|
* under the path. Return null if the path is not a mount point
|
||||||
|
* and there are no mount points under the path.
|
||||||
* @throws IOException Throws exception if the data is not available.
|
* @throws IOException Throws exception if the data is not available.
|
||||||
*/
|
*/
|
||||||
List<String> getMountPoints(String path) throws IOException;
|
List<String> getMountPoints(String path) throws IOException;
|
||||||
|
|
|
@ -726,6 +726,9 @@ public class RouterClientProtocol implements ClientProtocol {
|
||||||
date = dates.get(src);
|
date = dates.get(src);
|
||||||
}
|
}
|
||||||
ret = getMountPointStatus(src, children.size(), date);
|
ret = getMountPointStatus(src, children.size(), date);
|
||||||
|
} else if (children != null) {
|
||||||
|
// The src is a mount point, but there are no files or directories
|
||||||
|
ret = getMountPointStatus(src, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1734,13 +1737,26 @@ public class RouterClientProtocol implements ClientProtocol {
|
||||||
FsPermission permission = FsPermission.getDirDefault();
|
FsPermission permission = FsPermission.getDirDefault();
|
||||||
String owner = this.superUser;
|
String owner = this.superUser;
|
||||||
String group = this.superGroup;
|
String group = this.superGroup;
|
||||||
try {
|
if (subclusterResolver instanceof MountTableResolver) {
|
||||||
// TODO support users, it should be the user for the pointed folder
|
try {
|
||||||
UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
|
MountTableResolver mountTable = (MountTableResolver) subclusterResolver;
|
||||||
owner = ugi.getUserName();
|
MountTable entry = mountTable.getMountPoint(name);
|
||||||
group = ugi.getPrimaryGroupName();
|
if (entry != null) {
|
||||||
} catch (IOException e) {
|
permission = entry.getMode();
|
||||||
LOG.error("Cannot get the remote user: {}", e.getMessage());
|
owner = entry.getOwnerName();
|
||||||
|
group = entry.getGroupName();
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.error("Cannot get mount point: {}", e.getMessage());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
|
||||||
|
owner = ugi.getUserName();
|
||||||
|
group = ugi.getPrimaryGroupName();
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.error("Cannot get remote user: {}", e.getMessage());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
long inodeId = 0;
|
long inodeId = 0;
|
||||||
return new HdfsFileStatus.Builder()
|
return new HdfsFileStatus.Builder()
|
||||||
|
|
|
@ -87,11 +87,12 @@ public class RouterQuotaUpdateService extends PeriodicService {
|
||||||
|
|
||||||
QuotaUsage currentQuotaUsage = null;
|
QuotaUsage currentQuotaUsage = null;
|
||||||
|
|
||||||
// Check whether destination path exists in filesystem. If destination
|
// Check whether destination path exists in filesystem. When the
|
||||||
// is not present, reset the usage. For other mount entry get current
|
// mtime is zero, the destination is not present and reset the usage.
|
||||||
// quota usage
|
// This is because mount table does not have mtime.
|
||||||
|
// For other mount entry get current quota usage
|
||||||
HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
|
HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
|
||||||
if (ret == null) {
|
if (ret == null || ret.getModificationTime() == 0) {
|
||||||
currentQuotaUsage = new RouterQuotaUsage.Builder()
|
currentQuotaUsage = new RouterQuotaUsage.Builder()
|
||||||
.fileAndDirectoryCount(0)
|
.fileAndDirectoryCount(0)
|
||||||
.quota(nsQuota)
|
.quota(nsQuota)
|
||||||
|
|
|
@ -303,15 +303,16 @@ public class MockResolver
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<String> getMountPoints(String path) throws IOException {
|
public List<String> getMountPoints(String path) throws IOException {
|
||||||
|
// Mounts only supported under root level
|
||||||
|
if (!path.equals("/")) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
List<String> mounts = new ArrayList<>();
|
List<String> mounts = new ArrayList<>();
|
||||||
if (path.equals("/")) {
|
for (String mount : this.locations.keySet()) {
|
||||||
// Mounts only supported under root level
|
if (mount.length() > 1) {
|
||||||
for (String mount : this.locations.keySet()) {
|
// Remove leading slash, this is the behavior of the mount tree,
|
||||||
if (mount.length() > 1) {
|
// return only names.
|
||||||
// Remove leading slash, this is the behavior of the mount tree,
|
mounts.add(mount.replace("/", ""));
|
||||||
// return only names.
|
|
||||||
mounts.add(mount.replace("/", ""));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mounts;
|
return mounts;
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
|
@ -43,8 +44,12 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
|
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -59,9 +64,11 @@ public class TestRouterMountTable {
|
||||||
private static RouterContext routerContext;
|
private static RouterContext routerContext;
|
||||||
private static MountTableResolver mountTable;
|
private static MountTableResolver mountTable;
|
||||||
private static ClientProtocol routerProtocol;
|
private static ClientProtocol routerProtocol;
|
||||||
|
private static long startTime;
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void globalSetUp() throws Exception {
|
public static void globalSetUp() throws Exception {
|
||||||
|
startTime = Time.now();
|
||||||
|
|
||||||
// Build and start a federated cluster
|
// Build and start a federated cluster
|
||||||
cluster = new StateStoreDFSCluster(false, 1);
|
cluster = new StateStoreDFSCluster(false, 1);
|
||||||
|
@ -92,6 +99,21 @@ public class TestRouterMountTable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void clearMountTable() throws IOException {
|
||||||
|
RouterClient client = routerContext.getAdminClient();
|
||||||
|
MountTableManager mountTableManager = client.getMountTableManager();
|
||||||
|
GetMountTableEntriesRequest req1 =
|
||||||
|
GetMountTableEntriesRequest.newInstance("/");
|
||||||
|
GetMountTableEntriesResponse response =
|
||||||
|
mountTableManager.getMountTableEntries(req1);
|
||||||
|
for (MountTable entry : response.getEntries()) {
|
||||||
|
RemoveMountTableEntryRequest req2 =
|
||||||
|
RemoveMountTableEntryRequest.newInstance(entry.getSourcePath());
|
||||||
|
mountTableManager.removeMountTableEntry(req2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReadOnly() throws Exception {
|
public void testReadOnly() throws Exception {
|
||||||
|
|
||||||
|
@ -157,7 +179,6 @@ public class TestRouterMountTable {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testListFilesTime() throws Exception {
|
public void testListFilesTime() throws Exception {
|
||||||
Long beforeCreatingTime = Time.now();
|
|
||||||
// Add mount table entry
|
// Add mount table entry
|
||||||
MountTable addEntry = MountTable.newInstance(
|
MountTable addEntry = MountTable.newInstance(
|
||||||
"/testdir", Collections.singletonMap("ns0", "/testdir"));
|
"/testdir", Collections.singletonMap("ns0", "/testdir"));
|
||||||
|
@ -211,10 +232,40 @@ public class TestRouterMountTable {
|
||||||
Long expectedTime = pathModTime.get(currentFile);
|
Long expectedTime = pathModTime.get(currentFile);
|
||||||
|
|
||||||
assertEquals(currentFile, fileName);
|
assertEquals(currentFile, fileName);
|
||||||
assertTrue(currentTime > beforeCreatingTime);
|
assertTrue(currentTime > startTime);
|
||||||
assertEquals(currentTime, expectedTime);
|
assertEquals(currentTime, expectedTime);
|
||||||
}
|
}
|
||||||
// Verify the total number of results found/matched
|
// Verify the total number of results found/matched
|
||||||
assertEquals(pathModTime.size(), listing.getPartialListing().length);
|
assertEquals(pathModTime.size(), listing.getPartialListing().length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that the file listing contains correct permission.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testMountTablePermissions() throws Exception {
|
||||||
|
// Add mount table entries
|
||||||
|
MountTable addEntry = MountTable.newInstance(
|
||||||
|
"/testdir1", Collections.singletonMap("ns0", "/testdir1"));
|
||||||
|
addEntry.setGroupName("group1");
|
||||||
|
addEntry.setOwnerName("owner1");
|
||||||
|
addEntry.setMode(FsPermission.createImmutable((short)0775));
|
||||||
|
assertTrue(addMountTable(addEntry));
|
||||||
|
addEntry = MountTable.newInstance(
|
||||||
|
"/testdir2", Collections.singletonMap("ns0", "/testdir2"));
|
||||||
|
addEntry.setGroupName("group2");
|
||||||
|
addEntry.setOwnerName("owner2");
|
||||||
|
addEntry.setMode(FsPermission.createImmutable((short)0755));
|
||||||
|
assertTrue(addMountTable(addEntry));
|
||||||
|
|
||||||
|
HdfsFileStatus fs = routerProtocol.getFileInfo("/testdir1");
|
||||||
|
assertEquals("group1", fs.getGroup());
|
||||||
|
assertEquals("owner1", fs.getOwner());
|
||||||
|
assertEquals((short) 0775, fs.getPermission().toShort());
|
||||||
|
|
||||||
|
fs = routerProtocol.getFileInfo("/testdir2");
|
||||||
|
assertEquals("group2", fs.getGroup());
|
||||||
|
assertEquals("owner2", fs.getOwner());
|
||||||
|
assertEquals((short) 0755, fs.getPermission().toShort());
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -123,8 +123,9 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
|
||||||
RouterContext rc = getRouterContext();
|
RouterContext rc = getRouterContext();
|
||||||
Router router = rc.getRouter();
|
Router router = rc.getRouter();
|
||||||
FileSubclusterResolver subclusterResolver = router.getSubclusterResolver();
|
FileSubclusterResolver subclusterResolver = router.getSubclusterResolver();
|
||||||
for (String mount : subclusterResolver.getMountPoints(path)) {
|
List<String> mountList = subclusterResolver.getMountPoints(path);
|
||||||
requiredPaths.add(mount);
|
if (mountList != null) {
|
||||||
|
requiredPaths.addAll(mountList);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get files/dirs from the Namenodes
|
// Get files/dirs from the Namenodes
|
||||||
|
|
Loading…
Reference in New Issue