HDFS-6330. Move mkdirs() to FSNamesystem. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1602484 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cd894ca890
commit
02f3ff3387
|
@ -435,6 +435,8 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-3493. Invalidate excess corrupted blocks as long as minimum
|
||||
replication is satisfied. (Juan Yu and Vinayakumar B via wang)
|
||||
|
||||
HDFS-6330. Move mkdirs() to FSNamesystem. (wheat9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
|
||||
|
|
|
@ -315,19 +315,7 @@ public class FSDirectory implements Closeable {
|
|||
UnresolvedLinkException, SnapshotAccessControlException, AclException {
|
||||
waitForReady();
|
||||
|
||||
// Always do an implicit mkdirs for parent directory tree.
|
||||
long modTime = now();
|
||||
|
||||
Path parent = new Path(path).getParent();
|
||||
if (parent == null) {
|
||||
// Trying to add "/" as a file - this path has no
|
||||
// parent -- avoids an NPE below.
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
|
||||
return null;
|
||||
}
|
||||
INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
|
||||
permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
|
||||
preferredBlockSize);
|
||||
|
@ -1806,112 +1794,6 @@ public class FSDirectory implements Closeable {
|
|||
// inodes can be null only when its called without holding lock
|
||||
return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a directory
|
||||
* If ancestor directories do not exist, automatically create them.
|
||||
|
||||
* @param src string representation of the path to the directory
|
||||
* @param permissions the permission of the directory
|
||||
* @param inheritPermission if the permission of the directory should inherit
|
||||
* from its parent or not. u+wx is implicitly added to
|
||||
* the automatically created directories, and to the
|
||||
* given directory if inheritPermission is true
|
||||
* @param now creation time
|
||||
* @return true if the operation succeeds false otherwise
|
||||
* @throws QuotaExceededException if directory creation violates
|
||||
* any quota limit
|
||||
* @throws UnresolvedLinkException if a symlink is encountered in src.
|
||||
* @throws SnapshotAccessControlException if path is in RO snapshot
|
||||
*/
|
||||
boolean mkdirs(String src, PermissionStatus permissions,
|
||||
boolean inheritPermission, long now)
|
||||
throws FileAlreadyExistsException, QuotaExceededException,
|
||||
UnresolvedLinkException, SnapshotAccessControlException,
|
||||
AclException {
|
||||
src = normalizePath(src);
|
||||
String[] names = INode.getPathNames(src);
|
||||
byte[][] components = INode.getPathComponents(names);
|
||||
final int lastInodeIndex = components.length - 1;
|
||||
|
||||
writeLock();
|
||||
try {
|
||||
INodesInPath iip = getExistingPathINodes(components);
|
||||
if (iip.isSnapshot()) {
|
||||
throw new SnapshotAccessControlException(
|
||||
"Modification on RO snapshot is disallowed");
|
||||
}
|
||||
INode[] inodes = iip.getINodes();
|
||||
|
||||
// find the index of the first null in inodes[]
|
||||
StringBuilder pathbuilder = new StringBuilder();
|
||||
int i = 1;
|
||||
for(; i < inodes.length && inodes[i] != null; i++) {
|
||||
pathbuilder.append(Path.SEPARATOR).append(names[i]);
|
||||
if (!inodes[i].isDirectory()) {
|
||||
throw new FileAlreadyExistsException("Parent path is not a directory: "
|
||||
+ pathbuilder+ " "+inodes[i].getLocalName());
|
||||
}
|
||||
}
|
||||
|
||||
// default to creating parent dirs with the given perms
|
||||
PermissionStatus parentPermissions = permissions;
|
||||
|
||||
// if not inheriting and it's the last inode, there's no use in
|
||||
// computing perms that won't be used
|
||||
if (inheritPermission || (i < lastInodeIndex)) {
|
||||
// if inheriting (ie. creating a file or symlink), use the parent dir,
|
||||
// else the supplied permissions
|
||||
// NOTE: the permissions of the auto-created directories violate posix
|
||||
FsPermission parentFsPerm = inheritPermission
|
||||
? inodes[i-1].getFsPermission() : permissions.getPermission();
|
||||
|
||||
// ensure that the permissions allow user write+execute
|
||||
if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
|
||||
parentFsPerm = new FsPermission(
|
||||
parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
|
||||
parentFsPerm.getGroupAction(),
|
||||
parentFsPerm.getOtherAction()
|
||||
);
|
||||
}
|
||||
|
||||
if (!parentPermissions.getPermission().equals(parentFsPerm)) {
|
||||
parentPermissions = new PermissionStatus(
|
||||
parentPermissions.getUserName(),
|
||||
parentPermissions.getGroupName(),
|
||||
parentFsPerm
|
||||
);
|
||||
// when inheriting, use same perms for entire path
|
||||
if (inheritPermission) permissions = parentPermissions;
|
||||
}
|
||||
}
|
||||
|
||||
// create directories beginning from the first null index
|
||||
for(; i < inodes.length; i++) {
|
||||
pathbuilder.append(Path.SEPARATOR).append(names[i]);
|
||||
unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i,
|
||||
components[i], (i < lastInodeIndex) ? parentPermissions
|
||||
: permissions, null, now);
|
||||
if (inodes[i] == null) {
|
||||
return false;
|
||||
}
|
||||
// Directory creation also count towards FilesCreated
|
||||
// to match count of FilesDeleted metric.
|
||||
if (getFSNamesystem() != null)
|
||||
NameNode.getNameNodeMetrics().incrFilesCreated();
|
||||
|
||||
final String cur = pathbuilder.toString();
|
||||
fsImage.getEditLog().logMkDir(cur, inodes[i]);
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug(
|
||||
"DIR* FSDirectory.mkdirs: created directory " + cur);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
|
||||
List<AclEntry> aclEntries, long timestamp)
|
||||
|
@ -1930,7 +1812,7 @@ public class FSDirectory implements Closeable {
|
|||
* The parent path to the directory is at [0, pos-1].
|
||||
* All ancestors exist. Newly created one stored at index pos.
|
||||
*/
|
||||
private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
|
||||
void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
|
||||
int pos, byte[] name, PermissionStatus permission,
|
||||
List<AclEntry> aclEntries, long timestamp)
|
||||
throws QuotaExceededException, AclException {
|
||||
|
@ -2242,10 +2124,8 @@ public class FSDirectory implements Closeable {
|
|||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
String normalizePath(String src) {
|
||||
|
||||
static String normalizePath(String src) {
|
||||
if (src.length() > 1 && src.endsWith("/")) {
|
||||
src = src.substring(0, src.length() - 1);
|
||||
}
|
||||
|
@ -2581,49 +2461,21 @@ public class FSDirectory implements Closeable {
|
|||
}
|
||||
return perm;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given symbolic link to the fs. Record it in the edits log.
|
||||
*/
|
||||
INodeSymlink addSymlink(String path, String target,
|
||||
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
|
||||
throws UnresolvedLinkException, FileAlreadyExistsException,
|
||||
QuotaExceededException, SnapshotAccessControlException, AclException {
|
||||
waitForReady();
|
||||
|
||||
final long modTime = now();
|
||||
if (createParent) {
|
||||
final String parent = new Path(path).getParent().toString();
|
||||
if (!mkdirs(parent, dirPerms, true, modTime)) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
final String userName = dirPerms.getUserName();
|
||||
INodeSymlink newNode = null;
|
||||
long id = namesystem.allocateNewInodeId();
|
||||
/**
|
||||
* Add the specified path into the namespace.
|
||||
*/
|
||||
INodeSymlink addSymlink(long id, String path, String target,
|
||||
long mtime, long atime, PermissionStatus perm)
|
||||
throws UnresolvedLinkException, QuotaExceededException {
|
||||
writeLock();
|
||||
try {
|
||||
newNode = unprotectedAddSymlink(id, path, target, modTime, modTime,
|
||||
new PermissionStatus(userName, null, FsPermission.getDefault()));
|
||||
return unprotectedAddSymlink(id, path, target, mtime, atime, perm);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
if (newNode == null) {
|
||||
NameNode.stateChangeLog.info("DIR* addSymlink: failed to add " + path);
|
||||
return null;
|
||||
}
|
||||
fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode,
|
||||
logRetryCache);
|
||||
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("DIR* addSymlink: " + path + " is added");
|
||||
}
|
||||
return newNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the specified path into the namespace. Invoked from edit log processing.
|
||||
*/
|
||||
INodeSymlink unprotectedAddSymlink(long id, String path, String target,
|
||||
long mtime, long atime, PermissionStatus perm)
|
||||
throws UnresolvedLinkException, QuotaExceededException {
|
||||
|
|
|
@ -145,6 +145,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
|
@ -166,6 +167,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
|
||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
|
@ -2074,7 +2076,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
checkFsObjectLimit();
|
||||
|
||||
// add symbolic link to namespace
|
||||
dir.addSymlink(link, target, dirPerms, createParent, logRetryCache);
|
||||
addSymlink(link, target, dirPerms, createParent, logRetryCache);
|
||||
resultingStat = getAuditFileInfo(link, false);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
|
@ -2327,8 +2329,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
final DatanodeDescriptor clientNode =
|
||||
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
|
||||
|
||||
INodeFile newNode = dir.addFile(src, permissions, replication, blockSize,
|
||||
holder, clientMachine, clientNode);
|
||||
INodeFile newNode = null;
|
||||
|
||||
// Always do an implicit mkdirs for parent directory tree.
|
||||
Path parent = new Path(src).getParent();
|
||||
if (parent != null && mkdirsRecursively(parent.toString(),
|
||||
permissions, true, now())) {
|
||||
newNode = dir.addFile(src, permissions, replication, blockSize,
|
||||
holder, clientMachine, clientNode);
|
||||
}
|
||||
|
||||
if (newNode == null) {
|
||||
throw new IOException("Unable to add " + src + " to namespace");
|
||||
}
|
||||
|
@ -3694,12 +3704,118 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
// create multiple inodes.
|
||||
checkFsObjectLimit();
|
||||
|
||||
if (!dir.mkdirs(src, permissions, false, now())) {
|
||||
if (!mkdirsRecursively(src, permissions, false, now())) {
|
||||
throw new IOException("Failed to create directory: " + src);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a directory
|
||||
* If ancestor directories do not exist, automatically create them.
|
||||
|
||||
* @param src string representation of the path to the directory
|
||||
* @param permissions the permission of the directory
|
||||
* @param inheritPermission if the permission of the directory should inherit
|
||||
* from its parent or not. u+wx is implicitly added to
|
||||
* the automatically created directories, and to the
|
||||
* given directory if inheritPermission is true
|
||||
* @param now creation time
|
||||
* @return true if the operation succeeds false otherwise
|
||||
* @throws QuotaExceededException if directory creation violates
|
||||
* any quota limit
|
||||
* @throws UnresolvedLinkException if a symlink is encountered in src.
|
||||
* @throws SnapshotAccessControlException if path is in RO snapshot
|
||||
*/
|
||||
private boolean mkdirsRecursively(String src, PermissionStatus permissions,
|
||||
boolean inheritPermission, long now)
|
||||
throws FileAlreadyExistsException, QuotaExceededException,
|
||||
UnresolvedLinkException, SnapshotAccessControlException,
|
||||
AclException {
|
||||
src = FSDirectory.normalizePath(src);
|
||||
String[] names = INode.getPathNames(src);
|
||||
byte[][] components = INode.getPathComponents(names);
|
||||
final int lastInodeIndex = components.length - 1;
|
||||
|
||||
dir.writeLock();
|
||||
try {
|
||||
INodesInPath iip = dir.getExistingPathINodes(components);
|
||||
if (iip.isSnapshot()) {
|
||||
throw new SnapshotAccessControlException(
|
||||
"Modification on RO snapshot is disallowed");
|
||||
}
|
||||
INode[] inodes = iip.getINodes();
|
||||
|
||||
// find the index of the first null in inodes[]
|
||||
StringBuilder pathbuilder = new StringBuilder();
|
||||
int i = 1;
|
||||
for(; i < inodes.length && inodes[i] != null; i++) {
|
||||
pathbuilder.append(Path.SEPARATOR).append(names[i]);
|
||||
if (!inodes[i].isDirectory()) {
|
||||
throw new FileAlreadyExistsException(
|
||||
"Parent path is not a directory: "
|
||||
+ pathbuilder + " "+inodes[i].getLocalName());
|
||||
}
|
||||
}
|
||||
|
||||
// default to creating parent dirs with the given perms
|
||||
PermissionStatus parentPermissions = permissions;
|
||||
|
||||
// if not inheriting and it's the last inode, there's no use in
|
||||
// computing perms that won't be used
|
||||
if (inheritPermission || (i < lastInodeIndex)) {
|
||||
// if inheriting (ie. creating a file or symlink), use the parent dir,
|
||||
// else the supplied permissions
|
||||
// NOTE: the permissions of the auto-created directories violate posix
|
||||
FsPermission parentFsPerm = inheritPermission
|
||||
? inodes[i-1].getFsPermission() : permissions.getPermission();
|
||||
|
||||
// ensure that the permissions allow user write+execute
|
||||
if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
|
||||
parentFsPerm = new FsPermission(
|
||||
parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
|
||||
parentFsPerm.getGroupAction(),
|
||||
parentFsPerm.getOtherAction()
|
||||
);
|
||||
}
|
||||
|
||||
if (!parentPermissions.getPermission().equals(parentFsPerm)) {
|
||||
parentPermissions = new PermissionStatus(
|
||||
parentPermissions.getUserName(),
|
||||
parentPermissions.getGroupName(),
|
||||
parentFsPerm
|
||||
);
|
||||
// when inheriting, use same perms for entire path
|
||||
if (inheritPermission) permissions = parentPermissions;
|
||||
}
|
||||
}
|
||||
|
||||
// create directories beginning from the first null index
|
||||
for(; i < inodes.length; i++) {
|
||||
pathbuilder.append(Path.SEPARATOR).append(names[i]);
|
||||
dir.unprotectedMkdir(allocateNewInodeId(), iip, i, components[i],
|
||||
(i < lastInodeIndex) ? parentPermissions : permissions, null,
|
||||
now);
|
||||
if (inodes[i] == null) {
|
||||
return false;
|
||||
}
|
||||
// Directory creation also count towards FilesCreated
|
||||
// to match count of FilesDeleted metric.
|
||||
NameNode.getNameNodeMetrics().incrFilesCreated();
|
||||
|
||||
final String cur = pathbuilder.toString();
|
||||
getEditLog().logMkDir(cur, inodes[i]);
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug(
|
||||
"mkdirs: created directory " + cur);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
dir.writeUnlock();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the content summary for a specific file/dir.
|
||||
*
|
||||
|
@ -4403,6 +4519,46 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given symbolic link to the fs. Record it in the edits log.
|
||||
* @param path
|
||||
* @param target
|
||||
* @param dirPerms
|
||||
* @param createParent
|
||||
* @param logRetryCache
|
||||
* @param dir
|
||||
*/
|
||||
private INodeSymlink addSymlink(String path, String target,
|
||||
PermissionStatus dirPerms,
|
||||
boolean createParent, boolean logRetryCache)
|
||||
throws UnresolvedLinkException, FileAlreadyExistsException,
|
||||
QuotaExceededException, SnapshotAccessControlException, AclException {
|
||||
dir.waitForReady();
|
||||
|
||||
final long modTime = now();
|
||||
if (createParent) {
|
||||
final String parent = new Path(path).getParent().toString();
|
||||
if (!mkdirsRecursively(parent, dirPerms, true, modTime)) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
final String userName = dirPerms.getUserName();
|
||||
long id = allocateNewInodeId();
|
||||
INodeSymlink newNode = dir.addSymlink(id, path, target, modTime, modTime,
|
||||
new PermissionStatus(userName, null, FsPermission.getDefault()));
|
||||
if (newNode == null) {
|
||||
NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
|
||||
return null;
|
||||
}
|
||||
getEditLog().logSymlink(path, target, modTime, modTime, newNode,
|
||||
logRetryCache);
|
||||
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("addSymlink: " + path + " is added");
|
||||
}
|
||||
return newNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Periodically calls hasAvailableResources of NameNodeResourceChecker, and if
|
||||
* there are found to be insufficient resources available, causes the NN to
|
||||
|
|
|
@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
|
|||
import static org.apache.hadoop.util.Time.now;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -45,37 +46,20 @@ import org.junit.Test;
|
|||
|
||||
public class TestFsLimits {
|
||||
static Configuration conf;
|
||||
static INode[] inodes;
|
||||
static FSDirectory fs;
|
||||
static FSNamesystem fs;
|
||||
static boolean fsIsReady;
|
||||
|
||||
static final PermissionStatus perms
|
||||
= new PermissionStatus("admin", "admin", FsPermission.getDefault());
|
||||
|
||||
static private FSImage getMockFSImage() {
|
||||
FSEditLog editLog = mock(FSEditLog.class);
|
||||
static private FSNamesystem getMockNamesystem() throws IOException {
|
||||
FSImage fsImage = mock(FSImage.class);
|
||||
when(fsImage.getEditLog()).thenReturn(editLog);
|
||||
return fsImage;
|
||||
}
|
||||
|
||||
static private FSNamesystem getMockNamesystem() {
|
||||
FSNamesystem fsn = mock(FSNamesystem.class);
|
||||
when(
|
||||
fsn.createFsOwnerPermissions((FsPermission)anyObject())
|
||||
).thenReturn(
|
||||
new PermissionStatus("root", "wheel", FsPermission.getDefault())
|
||||
);
|
||||
FSEditLog editLog = mock(FSEditLog.class);
|
||||
doReturn(editLog).when(fsImage).getEditLog();
|
||||
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
|
||||
fsn.getFSDirectory().setReady(fsIsReady);
|
||||
return fsn;
|
||||
}
|
||||
|
||||
private static class MockFSDirectory extends FSDirectory {
|
||||
public MockFSDirectory() throws IOException {
|
||||
super(getMockFSImage(), getMockNamesystem(), conf);
|
||||
setReady(fsIsReady);
|
||||
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
|
@ -83,7 +67,7 @@ public class TestFsLimits {
|
|||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
|
||||
"namenode")).toString());
|
||||
|
||||
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
|
||||
fs = null;
|
||||
fsIsReady = true;
|
||||
}
|
||||
|
@ -197,9 +181,10 @@ public class TestFsLimits {
|
|||
lazyInitFSDirectory();
|
||||
Class<?> generated = null;
|
||||
try {
|
||||
fs.mkdirs(name, perms, false, now());
|
||||
fs.mkdirs(name, perms, false);
|
||||
} catch (Throwable e) {
|
||||
generated = e.getClass();
|
||||
e.printStackTrace();
|
||||
}
|
||||
assertEquals(expected, generated);
|
||||
}
|
||||
|
@ -209,7 +194,7 @@ public class TestFsLimits {
|
|||
lazyInitFSDirectory();
|
||||
Class<?> generated = null;
|
||||
try {
|
||||
fs.renameTo(src, dst, now(), new Rename[] { });
|
||||
fs.renameTo(src, dst, new Rename[] { });
|
||||
} catch (Throwable e) {
|
||||
generated = e.getClass();
|
||||
}
|
||||
|
@ -222,7 +207,7 @@ public class TestFsLimits {
|
|||
lazyInitFSDirectory();
|
||||
Class<?> generated = null;
|
||||
try {
|
||||
fs.renameTo(src, dst, now());
|
||||
fs.renameTo(src, dst);
|
||||
} catch (Throwable e) {
|
||||
generated = e.getClass();
|
||||
}
|
||||
|
@ -232,7 +217,7 @@ public class TestFsLimits {
|
|||
private static void lazyInitFSDirectory() throws IOException {
|
||||
// have to create after the caller has had a chance to set conf values
|
||||
if (fs == null) {
|
||||
fs = new MockFSDirectory();
|
||||
fs = getMockNamesystem();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue