HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. Contributed by Aaron T. Myers.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1334233 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2c4faa4b9c
commit
1409441e45
|
@ -294,6 +294,8 @@ Release 2.0.0 - UNRELEASED
|
|||
|
||||
HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu)
|
||||
|
||||
HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm)
|
||||
|
||||
BREAKDOWN OF HADOOP-7454 SUBTASKS
|
||||
|
||||
HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
|
||||
|
|
|
@ -346,7 +346,7 @@ public abstract class AbstractFileSystem {
|
|||
path);
|
||||
} else {
|
||||
throw new InvalidPathException(
|
||||
"Path without scheme with non-null autorhrity:" + path);
|
||||
"Path without scheme with non-null authority:" + path);
|
||||
}
|
||||
}
|
||||
String thisScheme = this.getUri().getScheme();
|
||||
|
|
|
@ -223,6 +223,13 @@ public class Path implements Comparable {
|
|||
return isUriPathAbsolute();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if and only if this path represents the root of a file system
|
||||
*/
|
||||
public boolean isRoot() {
|
||||
return getParent() == null;
|
||||
}
|
||||
|
||||
/** Returns the final component of this path.*/
|
||||
public String getName() {
|
||||
String path = uri.getPath();
|
||||
|
|
|
@ -75,7 +75,8 @@ class ChRootedFileSystem extends FilterFileSystem {
|
|||
protected Path fullPath(final Path path) {
|
||||
super.checkPath(path);
|
||||
return path.isAbsolute() ?
|
||||
new Path(chRootPathPartString + path.toUri().getPath()) :
|
||||
new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
|
||||
+ path.toUri().getPath()) :
|
||||
new Path(chRootPathPartString + workingDir.toUri().getPath(), path);
|
||||
}
|
||||
|
||||
|
@ -127,7 +128,7 @@ class ChRootedFileSystem extends FilterFileSystem {
|
|||
}
|
||||
String pathPart = p.toUri().getPath();
|
||||
return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart
|
||||
.substring(chRootPathPartString.length() + 1);
|
||||
.substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -79,7 +79,8 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
*/
|
||||
protected Path fullPath(final Path path) {
|
||||
super.checkPath(path);
|
||||
return new Path(chRootPathPartString + path.toUri().getPath());
|
||||
return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
|
||||
+ path.toUri().getPath());
|
||||
}
|
||||
|
||||
public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
|
||||
|
@ -127,7 +128,8 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
}
|
||||
String pathPart = p.toUri().getPath();
|
||||
return (pathPart.length() == chRootPathPartString.length()) ?
|
||||
"" : pathPart.substring(chRootPathPartString.length() + 1);
|
||||
"" : pathPart.substring(chRootPathPartString.length() +
|
||||
(chRootPathPart.isRoot() ? 0 : 1));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest {
|
|||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fsTarget.delete(targetTestRoot, true);
|
||||
initializeTargetTestRoot();
|
||||
|
||||
fsTarget.mkdirs(targetTestRoot);
|
||||
// Make user and data dirs - we creates links to them in the mount table
|
||||
fsTarget.mkdirs(new Path(targetTestRoot,"user"));
|
||||
fsTarget.mkdirs(new Path(targetTestRoot,"data"));
|
||||
|
@ -99,7 +96,16 @@ public class ViewFileSystemBaseTest {
|
|||
fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
|
||||
}
|
||||
|
||||
void initializeTargetTestRoot() throws IOException {
|
||||
targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fsTarget.delete(targetTestRoot, true);
|
||||
|
||||
fsTarget.mkdirs(targetTestRoot);
|
||||
}
|
||||
|
||||
void setupMountPoints() {
|
||||
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri());
|
||||
ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri());
|
||||
ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri());
|
||||
|
@ -121,7 +127,7 @@ public class ViewFileSystemBaseTest {
|
|||
}
|
||||
|
||||
int getExpectedMountPoints() {
|
||||
return 7;
|
||||
return 8;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -166,7 +172,7 @@ public class ViewFileSystemBaseTest {
|
|||
}
|
||||
}
|
||||
}
|
||||
Assert.assertEquals(expectedTokenCount / 2, delTokens.size());
|
||||
Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size());
|
||||
}
|
||||
|
||||
int getExpectedDelegationTokenCountWithCredentials() {
|
||||
|
@ -309,6 +315,16 @@ public class ViewFileSystemBaseTest {
|
|||
Assert.assertTrue("Renamed dest should exist as dir in target",
|
||||
fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
|
||||
|
||||
// Make a directory under a directory that's mounted from the root of another FS
|
||||
fsView.mkdirs(new Path("/targetRoot/dirFoo"));
|
||||
Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
|
||||
boolean dirFooPresent = false;
|
||||
for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
|
||||
if (fileStatus.getPath().getName().equals("dirFoo")) {
|
||||
dirFooPresent = true;
|
||||
}
|
||||
}
|
||||
Assert.assertTrue(dirFooPresent);
|
||||
}
|
||||
|
||||
// rename across mount points that point to same target also fail
|
||||
|
@ -418,7 +434,7 @@ public class ViewFileSystemBaseTest {
|
|||
}
|
||||
|
||||
int getExpectedDirPaths() {
|
||||
return 6;
|
||||
return 7;
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
|
|||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileContextTestHelper;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.FileContextTestHelper.fileType;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
|
@ -77,12 +78,8 @@ public class ViewFsBaseTest {
|
|||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
|
||||
targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fcTarget.delete(targetTestRoot, true);
|
||||
initializeTargetTestRoot();
|
||||
|
||||
fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
|
||||
// Make user and data dirs - we creates links to them in the mount table
|
||||
fcTarget.mkdir(new Path(targetTestRoot,"user"),
|
||||
FileContext.DEFAULT_PERM, true);
|
||||
|
@ -100,6 +97,7 @@ public class ViewFsBaseTest {
|
|||
|
||||
// Set up the defaultMT in the config with our mount point links
|
||||
conf = new Configuration();
|
||||
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(conf, "/user",
|
||||
new Path(targetTestRoot,"user").toUri());
|
||||
ConfigUtil.addLink(conf, "/user2",
|
||||
|
@ -118,6 +116,14 @@ public class ViewFsBaseTest {
|
|||
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
|
||||
// Also try viewfs://default/ - note authority is name of mount table
|
||||
}
|
||||
|
||||
void initializeTargetTestRoot() throws IOException {
|
||||
targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fcTarget.delete(targetTestRoot, true);
|
||||
|
||||
fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
|
@ -128,7 +134,11 @@ public class ViewFsBaseTest {
|
|||
public void testGetMountPoints() {
|
||||
ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem();
|
||||
MountPoint[] mountPoints = viewfs.getMountPoints();
|
||||
Assert.assertEquals(7, mountPoints.length);
|
||||
Assert.assertEquals(8, mountPoints.length);
|
||||
}
|
||||
|
||||
int getExpectedDelegationTokenCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -140,7 +150,7 @@ public class ViewFsBaseTest {
|
|||
public void testGetDelegationTokens() throws IOException {
|
||||
List<Token<?>> delTokens =
|
||||
fcView.getDelegationTokens(new Path("/"), "sanjay");
|
||||
Assert.assertEquals(0, delTokens.size());
|
||||
Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
|
||||
}
|
||||
|
||||
|
||||
|
@ -281,6 +291,19 @@ public class ViewFsBaseTest {
|
|||
Assert.assertTrue("Renamed dest should exist as dir in target",
|
||||
isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
|
||||
|
||||
// Make a directory under a directory that's mounted from the root of another FS
|
||||
fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
|
||||
Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
|
||||
boolean dirFooPresent = false;
|
||||
RemoteIterator<FileStatus> dirContents = fcView.listStatus(new Path(
|
||||
"/targetRoot/"));
|
||||
while (dirContents.hasNext()) {
|
||||
FileStatus fileStatus = dirContents.next();
|
||||
if (fileStatus.getPath().getName().equals("dirFoo")) {
|
||||
dirFooPresent = true;
|
||||
}
|
||||
}
|
||||
Assert.assertTrue(dirFooPresent);
|
||||
}
|
||||
|
||||
// rename across mount points that point to same target also fail
|
||||
|
@ -358,7 +381,7 @@ public class ViewFsBaseTest {
|
|||
|
||||
FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
|
||||
FileStatus fs;
|
||||
Assert.assertEquals(6, dirPaths.length);
|
||||
Assert.assertEquals(7, dirPaths.length);
|
||||
fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths);
|
||||
Assert.assertNotNull(fs);
|
||||
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
|
||||
|
|
|
@ -4556,7 +4556,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
if (destinationExisted && dinfo.isDir()) {
|
||||
Path spath = new Path(src);
|
||||
Path parent = spath.getParent();
|
||||
if (isRoot(parent)) {
|
||||
if (parent.isRoot()) {
|
||||
overwrite = parent.toString();
|
||||
} else {
|
||||
overwrite = parent.toString() + Path.SEPARATOR;
|
||||
|
@ -4569,10 +4569,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
leaseManager.changeLease(src, dst, overwrite, replaceBy);
|
||||
}
|
||||
|
||||
private boolean isRoot(Path path) {
|
||||
return path.getParent() == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes leases.
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
import javax.security.auth.login.LoginException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
/**
|
||||
* Make sure that ViewFileSystem works when the root of an FS is mounted to a
|
||||
* ViewFileSystem mount point.
|
||||
*/
|
||||
public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Configuration CONF = new Configuration();
|
||||
private static FileSystem fHdfs;
|
||||
|
||||
@BeforeClass
|
||||
public static void clusterSetupAtBegining() throws IOException,
|
||||
LoginException, URISyntaxException {
|
||||
SupportsBlocks = true;
|
||||
CONF.setBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(CONF)
|
||||
.numDataNodes(2)
|
||||
.build();
|
||||
cluster.waitClusterUp();
|
||||
|
||||
fHdfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void clusterShutdownAtEnd() throws Exception {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fsTarget = fHdfs;
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this so that we don't set the targetTestRoot to any path under the
|
||||
* root of the FS, and so that we don't try to delete the test dir, but rather
|
||||
* only its contents.
|
||||
*/
|
||||
@Override
|
||||
void initializeTargetTestRoot() throws IOException {
|
||||
targetTestRoot = fHdfs.makeQualified(new Path("/"));
|
||||
for (FileStatus status : fHdfs.listStatus(targetTestRoot)) {
|
||||
fHdfs.delete(status.getPath(), true);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
int getExpectedDelegationTokenCount() {
|
||||
return 8;
|
||||
}
|
||||
|
||||
@Override
|
||||
int getExpectedDelegationTokenCountWithCredentials() {
|
||||
return 1;
|
||||
}
|
||||
}
|
|
@ -105,17 +105,17 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
|
|||
// additional mount.
|
||||
@Override
|
||||
int getExpectedDirPaths() {
|
||||
return 7;
|
||||
return 8;
|
||||
}
|
||||
|
||||
@Override
|
||||
int getExpectedMountPoints() {
|
||||
return 8;
|
||||
return 9;
|
||||
}
|
||||
|
||||
@Override
|
||||
int getExpectedDelegationTokenCount() {
|
||||
return 8;
|
||||
return 9;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
import javax.security.auth.login.LoginException;
|
||||
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
/**
|
||||
* Make sure that ViewFs works when the root of an FS is mounted to a ViewFs
|
||||
* mount point.
|
||||
*/
|
||||
public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static HdfsConfiguration CONF = new HdfsConfiguration();
|
||||
private static FileContext fc;
|
||||
|
||||
@BeforeClass
|
||||
public static void clusterSetupAtBegining() throws IOException,
|
||||
LoginException, URISyntaxException {
|
||||
SupportsBlocks = true;
|
||||
CONF.setBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
|
||||
cluster.waitClusterUp();
|
||||
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void ClusterShutdownAtEnd() throws Exception {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
// create the test root on local_fs
|
||||
fcTarget = fc;
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this so that we don't set the targetTestRoot to any path under the
|
||||
* root of the FS, and so that we don't try to delete the test dir, but rather
|
||||
* only its contents.
|
||||
*/
|
||||
@Override
|
||||
void initializeTargetTestRoot() throws IOException {
|
||||
targetTestRoot = fc.makeQualified(new Path("/"));
|
||||
RemoteIterator<FileStatus> dirContents = fc.listStatus(targetTestRoot);
|
||||
while (dirContents.hasNext()) {
|
||||
fc.delete(dirContents.next().getPath(), true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This overrides the default implementation since hdfs does have delegation
|
||||
* tokens.
|
||||
*/
|
||||
@Override
|
||||
int getExpectedDelegationTokenCount() {
|
||||
return 8;
|
||||
}
|
||||
}
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.fs.viewfs;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.List;
|
||||
|
||||
import javax.security.auth.login.LoginException;
|
||||
|
||||
|
@ -30,20 +29,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
public class TestViewFsHdfs extends ViewFsBaseTest {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Path defaultWorkingDirectory;
|
||||
private static HdfsConfiguration CONF = new HdfsConfiguration();
|
||||
private static FileContext fc;
|
||||
|
||||
|
@ -57,7 +49,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
|
|||
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
|
||||
cluster.waitClusterUp();
|
||||
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
|
||||
defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
|
||||
Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
|
||||
UserGroupInformation.getCurrentUser().getShortUserName()));
|
||||
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
|
||||
}
|
||||
|
@ -73,25 +65,15 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
|
|||
// create the test root on local_fs
|
||||
fcTarget = fc;
|
||||
super.setUp();
|
||||
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This overides the default implementation since hdfs does have delegation
|
||||
/**
|
||||
* This overrides the default implementation since hdfs does have delegation
|
||||
* tokens.
|
||||
*/
|
||||
@Override
|
||||
@Test
|
||||
public void testGetDelegationTokens() throws IOException {
|
||||
List<Token<?>> delTokens =
|
||||
fcView.getDelegationTokens(new Path("/"), "sanjay");
|
||||
Assert.assertEquals(7, delTokens.size());
|
||||
int getExpectedDelegationTokenCount() {
|
||||
return 8;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue