HDFS-4087. Protocol changes for listSnapshots functionality. Contributed by Brandon Li.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1400698 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-10-21 19:05:49 +00:00
parent d9a9daeb2d
commit 625d7cf20b
7 changed files with 155 additions and 6 deletions

View File

@ -11,3 +11,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4083. Protocol changes for snapshots. (suresh)
HDFS-4077. Add support for Snapshottable Directory. (Nicholas via suresh)
HDFS-4087. Protocol changes for listSnapshots functionality.
(Brandon Li via suresh)

View File

@ -965,4 +965,12 @@ public interface ClientProtocol {
*/
public void deleteSnapshot(String snapshotName, String snapshotRoot)
throws IOException;
/**
* List snapshots of one directory
* @param snapshotRoot the path where the snapshot exists
*/
public SnapshotInfo[] listSnapshots(String snapshotRoot)
throws IOException;
}

View File

@ -0,0 +1,82 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
/**
* Interface that represents the over the wire information for a file.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SnapshotInfo {
private final String snapshotName;
private final String snapshotRoot;
private final String createTime;
private final FsPermissionProto permission;
private final String owner;
private final String group;
public SnapshotInfo(String sname, String sroot, String ctime,
FsPermissionProto permission, String owner, String group) {
this.snapshotName = sname;
this.snapshotRoot = sroot;
this.createTime = ctime;
this.permission = permission;
this.owner = owner;
this.group = group;
}
final public String getSnapshotName() {
return snapshotName;
}
final public String getSnapshotRoot() {
return snapshotRoot;
}
final public String getCreateTime() {
return createTime;
}
final public FsPermissionProto getPermission() {
return permission;
}
final public String getOwner() {
return owner;
}
final public String getGroup() {
return group;
}
@Override
public String toString() {
return getClass().getSimpleName()
+ "{snapshotName=" + snapshotName
+ "; snapshotRoot=" + snapshotRoot
+ "; createTime=" + createTime
+ "; permission=" + permission
+ "; owner=" + owner
+ "; group=" + group
+ "}";
}
}

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -135,6 +136,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProt
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.io.Text;
@ -881,7 +883,25 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
@Override
public ListSnapshotsResponseProto listSnapshots(RpcController controller,
ListSnapshotsRequestProto request) throws ServiceException {
// TODO Auto-generated method stub
return null;
SnapshotInfo[] result;
try {
result = server.listSnapshots(request.getSnapshotRoot());
ListSnapshotsResponseProto.Builder builder = ListSnapshotsResponseProto
.newBuilder();
for (SnapshotInfo si : result) {
SnapshotInfoProto.Builder infobuilder = SnapshotInfoProto.newBuilder();
infobuilder.setSnapshotName(si.getSnapshotName());
infobuilder.setSnapshotRoot(si.getSnapshotRoot());
infobuilder.setCreateTime(si.getCreateTime());
infobuilder.setPermission(si.getPermission());
infobuilder.setOwner(si.getOwner());
infobuilder.setGroup(si.getGroup());
builder.addSnapshots(infobuilder);
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
}

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -79,6 +80,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLis
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
@ -101,6 +104,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@ -852,6 +856,27 @@ public class ClientNamenodeProtocolTranslatorPB implements
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException {
SnapshotInfo[] sinfo = null;
ListSnapshotsRequestProto req = null;
req = ListSnapshotsRequestProto.newBuilder().setSnapshotRoot(snapshotRoot)
.build();
try {
ListSnapshotsResponseProto resp = rpcProxy.listSnapshots(null, req);
sinfo = new SnapshotInfo[resp.getSnapshotsCount()];
for (int i = 0; i < resp.getSnapshotsCount(); i++) {
SnapshotInfoProto siProto = resp.getSnapshots(i);
sinfo[i] = new SnapshotInfo(siProto.getSnapshotName(), resp
.getSnapshots(i).getSnapshotRoot(), siProto.getCreateTime(),
siProto.getPermission(), siProto.getOwner(), siProto.getGroup());
}
return sinfo;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
@ -1084,4 +1085,12 @@ class NameNodeRpcServer implements NamenodeProtocols {
throws IOException {
// TODO Auto-generated method stub
}
@Override
public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException {
// TODO Auto-generated method stub
SnapshotInfo[] si = new SnapshotInfo[1];
si[0] = new SnapshotInfo(null, null, null, null, null, null);
return si;
}
}

View File

@ -381,9 +381,11 @@ message VersionResponseProto {
*/
message SnapshotInfoProto {
required string snapshotName = 1;
required FsPermissionProto permission = 2;
required string owner = 3;
required string group = 4;
required string snapshotRoot = 2;
required FsPermissionProto permission = 3;
required string owner = 4;
required string group = 5;
required string createTime = 6;
// TODO: do we need access time?
}