HDFS-4230. Support listing of all the snapshottable directories. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1429643 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-01-07 01:43:21 +00:00
parent f96d0a3585
commit 93872b58e5
16 changed files with 537 additions and 13 deletions

View File

@ -92,3 +92,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4357. Fix a bug that if an inode is replaced, further INode operations HDFS-4357. Fix a bug that if an inode is replaced, further INode operations
should apply to the new inode. (Jing Zhao via szetszwo) should apply to the new inode. (Jing Zhao via szetszwo)
HDFS-4230. Support listing of all the snapshottable directories. (Jing Zhao
via szetszwo)

View File

@ -118,6 +118,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
@ -1929,6 +1930,18 @@ public class DFSClient implements java.io.Closeable {
checkOpen(); checkOpen();
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName); namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
} }
/**
* Get all the current snapshottable directories.
* @return All the current snapshottable directories
* @throws IOException
* @see ClientProtocol#getSnapshottableDirListing()
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
checkOpen();
return namenode.getSnapshottableDirListing();
}
/** /**
* Allow snapshot on a directory. * Allow snapshot on a directory.

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -942,4 +943,13 @@ public class DistributedFileSystem extends FileSystem {
String snapshotNewName) throws IOException { String snapshotNewName) throws IOException {
dfs.renameSnapshot(path, snapshotOldName, snapshotNewName); dfs.renameSnapshot(path, snapshotOldName, snapshotNewName);
} }
/**
* @return All the snapshottable directories
* @throws IOException
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
return dfs.getSnapshottableDirListing();
}
} }

View File

@ -525,6 +525,16 @@ public interface ClientProtocol {
boolean needLocation) boolean needLocation)
throws AccessControlException, FileNotFoundException, throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException; UnresolvedLinkException, IOException;
/**
* Get listing of all the snapshottable directories
*
* @return Information about all the current snapshottable directory
* @throws IOException If an I/O error occurred
*/
@Idempotent
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException;
/////////////////////////////////////// ///////////////////////////////////////
// System issues and management // System issues and management

View File

@ -0,0 +1,87 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
/**
* Metadata about a snapshottable directory
*/
public class SnapshottableDirectoryStatus {
/** Basic information of the snapshottable directory */
private HdfsFileStatus dirStatus;
/** Number of snapshots that have been taken*/
private int snapshotNumber;
/** Number of snapshots allowed. */
private int snapshotQuota;
/** Full path of the parent. */
private byte[] parentFullPath;
public SnapshottableDirectoryStatus(long modification_time, long access_time,
FsPermission permission, String owner, String group, byte[] localName,
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName);
this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath;
}
/**
* @return Number of snapshots that have been taken for the directory
*/
public int getSnapshotNumber() {
return snapshotNumber;
}
/**
* @return Number of snapshots allowed for the directory
*/
public int getSnapshotQuota() {
return snapshotQuota;
}
/**
* @return Full path of the parent
*/
public byte[] getParentFullPath() {
return parentFullPath;
}
/**
* @return The basic information of the directory
*/
public HdfsFileStatus getDirStatus() {
return dirStatus;
}
/**
* @return Full path of the file
*/
public Path getFullPath() {
String parentFullPathStr = (parentFullPath == null || parentFullPath.length == 0) ? null
: DFSUtil.bytes2String(parentFullPath);
return parentFullPathStr == null ? new Path(dirStatus.getLocalName())
: new Path(parentFullPathStr, dirStatus.getLocalName());
}
}

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -87,6 +88,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPre
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
@ -906,4 +909,24 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
static final GetSnapshottableDirListingResponseProto NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE
= GetSnapshottableDirListingResponseProto.newBuilder().build();
@Override
public GetSnapshottableDirListingResponseProto getSnapshottableDirListing(
RpcController controller, GetSnapshottableDirListingRequestProto request)
throws ServiceException {
try {
SnapshottableDirectoryStatus[] result = server
.getSnapshottableDirListing();
if (result != null) {
return GetSnapshottableDirListingResponseProto.newBuilder().
setSnapshottableDirList(PBHelper.convert(result)).build();
} else {
return NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE;
}
} catch (IOException e) {
throw new ServiceException(e);
}
}
} }

View File

@ -42,11 +42,11 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
@ -63,10 +63,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Disall
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
@ -79,6 +79,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLis
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
@ -111,6 +113,7 @@ import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
@ -877,4 +880,22 @@ public class ClientNamenodeProtocolTranslatorPB implements
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
GetSnapshottableDirListingRequestProto req =
GetSnapshottableDirListingRequestProto.newBuilder().build();
try {
GetSnapshottableDirListingResponseProto result = rpcProxy
.getSnapshottableDirListing(null, req);
if (result.hasSnapshottableDirList()) {
return PBHelper.convert(result.getSnapshottableDirList());
}
return null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
} }

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
@ -93,6 +94,8 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestPro
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@ -1021,7 +1024,6 @@ public class PBHelper {
return new EnumSetWritable<CreateFlag>(result); return new EnumSetWritable<CreateFlag>(result);
} }
public static HdfsFileStatus convert(HdfsFileStatusProto fs) { public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
if (fs == null) if (fs == null)
return null; return null;
@ -1036,6 +1038,21 @@ public class PBHelper {
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null); fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null);
} }
public static SnapshottableDirectoryStatus convert(
SnapshottableDirectoryStatusProto sdirStatusProto) {
if (sdirStatusProto == null) {
return null;
}
return new SnapshottableDirectoryStatus(sdirStatusProto.getDirStatus()
.getModificationTime(), sdirStatusProto.getDirStatus().getAccessTime(),
PBHelper.convert(sdirStatusProto.getDirStatus().getPermission()),
sdirStatusProto.getDirStatus().getOwner(), sdirStatusProto
.getDirStatus().getGroup(), sdirStatusProto.getDirStatus()
.getPath().toByteArray(), sdirStatusProto.getSnapshotNumber(),
sdirStatusProto.getSnapshotQuota(), sdirStatusProto.getParentFullpath()
.toByteArray());
}
public static HdfsFileStatusProto convert(HdfsFileStatus fs) { public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
if (fs == null) if (fs == null)
return null; return null;
@ -1070,6 +1087,25 @@ public class PBHelper {
return builder.build(); return builder.build();
} }
public static SnapshottableDirectoryStatusProto convert(
SnapshottableDirectoryStatus status) {
if (status == null) {
return null;
}
int snapshotNumber = status.getSnapshotNumber();
int snapshotQuota = status.getSnapshotQuota();
byte[] parentFullPath = status.getParentFullPath();
ByteString parentFullPathBytes = ByteString
.copyFrom(parentFullPath == null ? new byte[0] : parentFullPath);
HdfsFileStatusProto fs = convert(status.getDirStatus());
SnapshottableDirectoryStatusProto.Builder builder =
SnapshottableDirectoryStatusProto
.newBuilder().setSnapshotNumber(snapshotNumber)
.setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
.setDirStatus(fs);
return builder.build();
}
public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) { public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) {
if (fs == null) return null; if (fs == null) return null;
final int len = fs.length; final int len = fs.length;
@ -1311,5 +1347,38 @@ public class PBHelper {
return JournalInfoProto.newBuilder().setClusterID(j.getClusterId()) return JournalInfoProto.newBuilder().setClusterID(j.getClusterId())
.setLayoutVersion(j.getLayoutVersion()) .setLayoutVersion(j.getLayoutVersion())
.setNamespaceID(j.getNamespaceId()).build(); .setNamespaceID(j.getNamespaceId()).build();
}
public static SnapshottableDirectoryStatus[] convert(
SnapshottableDirectoryListingProto sdlp) {
if (sdlp == null)
return null;
List<SnapshottableDirectoryStatusProto> list = sdlp
.getSnapshottableDirListingList();
if (list.isEmpty()) {
return new SnapshottableDirectoryStatus[0];
} else {
SnapshottableDirectoryStatus[] result =
new SnapshottableDirectoryStatus[list.size()];
for (int i = 0; i < list.size(); i++) {
result[i] = (SnapshottableDirectoryStatus) PBHelper
.convert(list.get(i));
}
return result;
}
}
public static SnapshottableDirectoryListingProto convert(
SnapshottableDirectoryStatus[] status) {
if (status == null)
return null;
SnapshottableDirectoryStatusProto[] protos =
new SnapshottableDirectoryStatusProto[status.length];
for (int i = 0; i < status.length; i++) {
protos[i] = PBHelper.convert(status[i]);
}
List<SnapshottableDirectoryStatusProto> protoList = Arrays.asList(protos);
return SnapshottableDirectoryListingProto.newBuilder()
.addAllSnapshottableDirListing(protoList).build();
} }
} }

View File

@ -23,6 +23,7 @@ import java.io.Closeable;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
@ -677,6 +678,8 @@ public class FSDirectory implements Closeable {
+ error); + error);
throw new IOException(error); throw new IOException(error);
} }
List<INodeDirectorySnapshottable> snapshottableDirs =
new ArrayList<INodeDirectorySnapshottable>();
if (dstInode != null) { // Destination exists if (dstInode != null) { // Destination exists
// It's OK to rename a file to a symlink and vice versa // It's OK to rename a file to a symlink and vice versa
if (dstInode.isDirectory() != srcInode.isDirectory()) { if (dstInode.isDirectory() != srcInode.isDirectory()) {
@ -702,7 +705,7 @@ public class FSDirectory implements Closeable {
throw new IOException(error); throw new IOException(error);
} }
} }
INode snapshotNode = hasSnapshot(dstInode); INode snapshotNode = hasSnapshot(dstInode, snapshottableDirs);
if (snapshotNode != null) { if (snapshotNode != null) {
error = "The direcotry " + dstInode.getFullPathName() error = "The direcotry " + dstInode.getFullPathName()
+ " cannot be deleted for renaming since " + " cannot be deleted for renaming since "
@ -770,6 +773,12 @@ public class FSDirectory implements Closeable {
filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks); filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
getFSNamesystem().removePathAndBlocks(src, collectedBlocks); getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
} }
if (snapshottableDirs.size() > 0) {
// There are snapshottable directories (without snapshots) to be
// deleted. Need to update the SnapshotManager.
namesystem.removeSnapshottableDirs(snapshottableDirs);
}
return filesDeleted >0; return filesDeleted >0;
} }
} finally { } finally {
@ -1034,13 +1043,20 @@ public class FSDirectory implements Closeable {
// snapshottable dir with snapshots, or its descendants have // snapshottable dir with snapshots, or its descendants have
// snapshottable dir with snapshots // snapshottable dir with snapshots
INode targetNode = inodes[inodes.length-1]; INode targetNode = inodes[inodes.length-1];
INode snapshotNode = hasSnapshot(targetNode); List<INodeDirectorySnapshottable> snapshottableDirs =
new ArrayList<INodeDirectorySnapshottable>();
INode snapshotNode = hasSnapshot(targetNode, snapshottableDirs);
if (snapshotNode != null) { if (snapshotNode != null) {
throw new IOException("The direcotry " + targetNode.getFullPathName() throw new IOException("The direcotry " + targetNode.getFullPathName()
+ " cannot be deleted since " + snapshotNode.getFullPathName() + " cannot be deleted since " + snapshotNode.getFullPathName()
+ " is snapshottable and already has snapshots"); + " is snapshottable and already has snapshots");
} }
filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, now); filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, now);
if (snapshottableDirs.size() > 0) {
// There are some snapshottable directories without snapshots to be
// deleted. Need to update the SnapshotManager.
namesystem.removeSnapshottableDirs(snapshottableDirs);
}
} }
} finally { } finally {
writeUnlock(); writeUnlock();
@ -1160,18 +1176,28 @@ public class FSDirectory implements Closeable {
* Check if the given INode (or one of its descendants) is snapshottable and * Check if the given INode (or one of its descendants) is snapshottable and
* already has snapshots. * already has snapshots.
* *
* @param target The given INode * @param target
* The given INode
* @param snapshottableDirs
* The list of directories that are snapshottable but do not have
* snapshots yet
* @return The INode which is snapshottable and already has snapshots. * @return The INode which is snapshottable and already has snapshots.
*/ */
private static INode hasSnapshot(INode target) { private static INode hasSnapshot(INode target,
List<INodeDirectorySnapshottable> snapshottableDirs) {
if (target instanceof INodeDirectory) { if (target instanceof INodeDirectory) {
INodeDirectory targetDir = (INodeDirectory) target; INodeDirectory targetDir = (INodeDirectory) target;
if (targetDir.isSnapshottable() if (targetDir.isSnapshottable()) {
&& ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) { INodeDirectorySnapshottable ssTargetDir =
return target; (INodeDirectorySnapshottable) targetDir;
} if (ssTargetDir.getNumSnapshots() > 0) {
return target;
} else {
snapshottableDirs.add(ssTargetDir);
}
}
for (INode child : targetDir.getChildrenList(null)) { for (INode child : targetDir.getChildrenList(null)) {
INode snapshotDir = hasSnapshot(child); INode snapshotDir = hasSnapshot(child, snapshottableDirs);
if (snapshotDir != null) { if (snapshotDir != null) {
return snapshotDir; return snapshotDir;
} }

View File

@ -137,6 +137,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -175,6 +176,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
@ -5750,6 +5752,39 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
newSnapshotRoot.toString(), null); newSnapshotRoot.toString(), null);
} }
} }
/**
* Get the list of all the current snapshottable directories
* @return The list of all the current snapshottable directories
* @throws IOException
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
readLock();
try {
checkOperation(OperationCategory.READ);
SnapshottableDirectoryStatus[] status = snapshotManager
.getSnapshottableDirListing();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
"listSnapshottableDirectory", null, null, null);
}
return status;
} finally {
readUnlock();
}
}
/**
* Remove a list of INodeDirectorySnapshottable from the SnapshotManager
* @param toRemove the list of INodeDirectorySnapshottable to be removed
*/
void removeSnapshottableDirs(List<INodeDirectorySnapshottable> toRemove) {
if (snapshotManager != null) {
snapshotManager.removeSnapshottableDirs(toRemove);
}
}
/** /**
* Default AuditLogger implementation; used when no access logger is * Default AuditLogger implementation; used when no access logger is

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
@ -1115,4 +1116,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
metrics.incrRenameSnapshotOps(); metrics.incrRenameSnapshotOps();
namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName); namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName);
} }
@Override // Client Protocol
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
SnapshottableDirectoryStatus[] status = namesystem
.getSnapshottableDirListing();
metrics.incrListSnapshottableDirOps();
return status;
}
} }

View File

@ -65,6 +65,8 @@ public class NameNodeMetrics {
MutableCounterLong createSnapshotOps; MutableCounterLong createSnapshotOps;
@Metric("Number of renameSnapshot operations") @Metric("Number of renameSnapshot operations")
MutableCounterLong renameSnapshotOps; MutableCounterLong renameSnapshotOps;
@Metric("Number of listSnapshottableDirectory operations")
MutableCounterLong listSnapshottableDirOps;
@Metric("Journal transactions") MutableRate transactions; @Metric("Journal transactions") MutableRate transactions;
@Metric("Journal syncs") MutableRate syncs; @Metric("Journal syncs") MutableRate syncs;
@ -183,6 +185,10 @@ public class NameNodeMetrics {
renameSnapshotOps.incr(); renameSnapshotOps.incr();
} }
public void incrListSnapshottableDirOps() {
listSnapshottableDirOps.incr();
}
public void addTransaction(long latency) { public void addTransaction(long latency) {
transactions.add(latency); transactions.add(latency);
} }

View File

@ -22,6 +22,8 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
@ -156,4 +158,35 @@ public class SnapshotManager implements SnapshotStats {
return numSnapshots.get(); return numSnapshots.get();
} }
/**
* @return All the current snapshottable directories
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing() {
if (snapshottables.isEmpty()) {
return null;
}
SnapshottableDirectoryStatus[] status =
new SnapshottableDirectoryStatus[snapshottables.size()];
for (int i = 0; i < snapshottables.size(); i++) {
INodeDirectorySnapshottable dir = snapshottables.get(i);
status[i] = new SnapshottableDirectoryStatus(dir.getModificationTime(),
dir.getAccessTime(), dir.getFsPermission(), dir.getUserName(),
dir.getGroupName(), dir.getLocalNameBytes(), dir.getNumSnapshots(),
dir.getSnapshotQuota(), dir.getParent() == null ? new byte[0]
: DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
}
return status;
}
/**
* Remove snapshottable directories from {@link #snapshottables}
* @param toRemoveList A list of INodeDirectorySnapshottable to be removed
*/
public void removeSnapshottableDirs(
List<INodeDirectorySnapshottable> toRemoveList) {
if (toRemoveList != null) {
this.snapshottables.removeAll(toRemoveList);
}
}
} }

View File

@ -208,6 +208,12 @@ message GetListingResponseProto {
optional DirectoryListingProto dirList = 1; optional DirectoryListingProto dirList = 1;
} }
message GetSnapshottableDirListingRequestProto { // no input parameters
}
message GetSnapshottableDirListingResponseProto {
optional SnapshottableDirectoryListingProto snapshottableDirList = 1;
}
message RenewLeaseRequestProto { message RenewLeaseRequestProto {
required string clientName = 1; required string clientName = 1;
} }
@ -557,4 +563,6 @@ service ClientNamenodeProtocol {
returns(AllowSnapshotResponseProto); returns(AllowSnapshotResponseProto);
rpc disallowSnapshot(DisallowSnapshotRequestProto) rpc disallowSnapshot(DisallowSnapshotRequestProto)
returns(DisallowSnapshotResponseProto); returns(DisallowSnapshotResponseProto);
rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
returns(GetSnapshottableDirListingResponseProto);
} }

View File

@ -152,7 +152,7 @@ message LocatedBlocksProto {
/** /**
* Status of a file, directory or symlink * Status of a file, directory or symlink
* Optionally includes a file's block locations if requested by client on the rpc call. * Optionally includes a file's block locations if requested by client on the rpc call.
*/ */
message HdfsFileStatusProto { message HdfsFileStatusProto {
@ -213,6 +213,27 @@ message DirectoryListingProto {
required uint32 remainingEntries = 2; required uint32 remainingEntries = 2;
} }
/**
* Status of a snapshottable directory: besides the normal information for
* a directory status, also include snapshot quota, number of snapshots, and
* the full path of the parent directory.
*/
message SnapshottableDirectoryStatusProto {
required HdfsFileStatusProto dirStatus = 1;
// Fields specific for snapshottable directory
required uint32 snapshot_quota = 2;
required uint32 snapshot_number = 3;
required bytes parent_fullpath = 4;
}
/**
* Snapshottable directory listing
*/
message SnapshottableDirectoryListingProto {
repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
}
/** /**
* Common node information shared by all the nodes in the cluster * Common node information shared by all the nodes in the cluster
*/ */

View File

@ -0,0 +1,149 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestSnapshottableDirListing {
static final long seed = 0;
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1024;
private final Path dir1 = new Path("/TestSnapshot1");
private final Path dir2 = new Path("/TestSnapshot2");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test listing all the snapshottable directories
*/
@Test
public void testListSnapshottableDir() throws Exception {
// Initially there is no snapshottable directories in the system
SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
assertNull(dirs);
// Make dir1 as snapshottable
hdfs.allowSnapshot(dir1.toString());
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir1, dirs[0].getFullPath());
// There is no snapshot for dir1 yet
assertEquals(0, dirs[0].getSnapshotNumber());
// Make dir2 as snapshottable
hdfs.allowSnapshot(dir2.toString());
dirs = hdfs.getSnapshottableDirListing();
assertEquals(2, dirs.length);
assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2.getName(), dirs[1].getDirStatus().getLocalName());
assertEquals(dir2, dirs[1].getFullPath());
// There is no snapshot for dir2 yet
assertEquals(0, dirs[1].getSnapshotNumber());
// Create dir3
final Path dir3 = new Path("/TestSnapshot3");
hdfs.mkdirs(dir3);
// Rename dir3 to dir2
hdfs.rename(dir3, dir2, Rename.OVERWRITE);
// Now we only have one snapshottable dir: dir1
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
// Make dir2 snapshottable again
hdfs.allowSnapshot(dir2.toString());
// Create a snapshot for dir2
hdfs.createSnapshot("s1", dir2.toString());
hdfs.createSnapshot("s2", dir2.toString());
dirs = hdfs.getSnapshottableDirListing();
// There are now 2 snapshots for dir2
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(2, dirs[1].getSnapshotNumber());
// Create sub-dirs under dir1
Path sub1 = new Path(dir1, "sub1");
Path file1 = new Path(sub1, "file1");
Path sub2 = new Path(dir1, "sub2");
Path file2 = new Path(sub2, "file2");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
// Make sub1 and sub2 snapshottable
hdfs.allowSnapshot(sub1.toString());
hdfs.allowSnapshot(sub2.toString());
dirs = hdfs.getSnapshottableDirListing();
assertEquals(4, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(sub1, dirs[2].getFullPath());
assertEquals(sub2, dirs[3].getFullPath());
// reset sub1
hdfs.disallowSnapshot(sub1.toString());
dirs = hdfs.getSnapshottableDirListing();
assertEquals(3, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(sub2, dirs[2].getFullPath());
// Remove dir1, both dir1 and sub2 will be removed
hdfs.delete(dir1, true);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir2.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir2, dirs[0].getFullPath());
}
}