HDFS-4083. Protocol changes for snapshots. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1400316 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-10-19 22:15:27 +00:00
parent ecffab63af
commit f5c4defcb3
7 changed files with 140 additions and 2 deletions

View File

@ -8,4 +8,4 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4086. Add editlog opcodes to allow and disallow snapshots on a HDFS-4086. Add editlog opcodes to allow and disallow snapshots on a
directory. (Brandon Li via suresh) directory. (Brandon Li via suresh)
HDFS-4083. Protocol changes for snapshots. (suresh)

View File

@ -949,4 +949,20 @@ public interface ClientProtocol {
* @throws IOException * @throws IOException
*/ */
public DataEncryptionKey getDataEncryptionKey() throws IOException; public DataEncryptionKey getDataEncryptionKey() throws IOException;
/**
* Create a snapshot
* @param snapshotName name of the snapshot created
* @param snapshotRoot the path that is being snapshotted
*/
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException;
/**
* Delete a snapshot
* @param snapshotName name of the snapshot to be deleted
* @param snapshotRoot the path where the snapshot exists
*/
public void deleteSnapshot(String snapshotName, String snapshotRoot)
throws IOException;
} }

View File

@ -46,10 +46,14 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Concat
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
@ -83,6 +87,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSer
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
@ -147,6 +153,11 @@ import com.google.protobuf.ServiceException;
public class ClientNamenodeProtocolServerSideTranslatorPB implements public class ClientNamenodeProtocolServerSideTranslatorPB implements
ClientNamenodeProtocolPB { ClientNamenodeProtocolPB {
final private ClientProtocol server; final private ClientProtocol server;
static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE =
DeleteSnapshotResponseProto.newBuilder().build();
static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
CreateSnapshotResponseProto.newBuilder().build();
/** /**
* Constructor * Constructor
@ -842,4 +853,35 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
@Override
public CreateSnapshotResponseProto createSnapshot(RpcController controller,
CreateSnapshotRequestProto request) throws ServiceException {
try {
server.createSnapshot(request.getSnapshotName(),
request.getSnapshotRoot());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_CREATE_SNAPSHOT_RESPONSE;
}
@Override
public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller,
DeleteSnapshotRequestProto request) throws ServiceException {
try {
server.deleteSnapshot(request.getSnapshotName(),
request.getSnapshotRoot());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_DELETE_SNAPSHOT_RESPONSE;
}
@Override
public ListSnapshotsResponseProto listSnapshots(RpcController controller,
ListSnapshotsRequestProto request) throws ServiceException {
// TODO Auto-generated method stub
return null;
}
} }

View File

@ -55,8 +55,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Cancel
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
@ -828,4 +830,28 @@ public class ClientNamenodeProtocolTranslatorPB implements
return rpcProxy; return rpcProxy;
} }
@Override
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
CreateSnapshotRequestProto req = CreateSnapshotRequestProto.newBuilder()
.setSnapshotName(snapshotName).setSnapshotRoot(snapshotRoot).build();
try {
rpcProxy.createSnapshot(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void deleteSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
DeleteSnapshotRequestProto req = DeleteSnapshotRequestProto.newBuilder()
.setSnapshotName(snapshotName).setSnapshotRoot(snapshotRoot).build();
try {
rpcProxy.deleteSnapshot(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
} }

View File

@ -1072,4 +1072,16 @@ class NameNodeRpcServer implements NamenodeProtocols {
public DataEncryptionKey getDataEncryptionKey() throws IOException { public DataEncryptionKey getDataEncryptionKey() throws IOException {
return namesystem.getBlockManager().generateDataEncryptionKey(); return namesystem.getBlockManager().generateDataEncryptionKey();
} }
@Override
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
// TODO Auto-generated method stub
}
@Override
public void deleteSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
// TODO Auto-generated method stub
}
} }

View File

@ -442,6 +442,30 @@ message GetDataEncryptionKeyResponseProto {
required DataEncryptionKeyProto dataEncryptionKey = 1; required DataEncryptionKeyProto dataEncryptionKey = 1;
} }
message CreateSnapshotRequestProto {
required string snapshotName = 1;
required string snapshotRoot = 2;
}
message CreateSnapshotResponseProto { // void response
}
message DeleteSnapshotRequestProto {
required string snapshotName = 1;
required string snapshotRoot = 2;
}
message DeleteSnapshotResponseProto { // void response
}
message ListSnapshotsRequestProto {
required string snapshotRoot = 1;
}
message ListSnapshotsResponseProto {
repeated SnapshotInfoProto snapshots = 1;
}
service ClientNamenodeProtocol { service ClientNamenodeProtocol {
rpc getBlockLocations(GetBlockLocationsRequestProto) rpc getBlockLocations(GetBlockLocationsRequestProto)
returns(GetBlockLocationsResponseProto); returns(GetBlockLocationsResponseProto);
@ -515,4 +539,10 @@ service ClientNamenodeProtocol {
returns(SetBalancerBandwidthResponseProto); returns(SetBalancerBandwidthResponseProto);
rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto) rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
returns(GetDataEncryptionKeyResponseProto); returns(GetDataEncryptionKeyResponseProto);
rpc createSnapshot(CreateSnapshotRequestProto)
returns(CreateSnapshotResponseProto);
rpc deleteSnapshot(DeleteSnapshotRequestProto)
returns(DeleteSnapshotResponseProto);
rpc listSnapshots(ListSnapshotsRequestProto)
returns(ListSnapshotsResponseProto);
} }

View File

@ -375,3 +375,15 @@ message VersionResponseProto {
required NamespaceInfoProto info = 1; required NamespaceInfoProto info = 1;
} }
/**
* Information related to a snapshot
* TODO: add more information
*/
message SnapshotInfoProto {
required string snapshotName = 1;
required FsPermissionProto permission = 2;
required string owner = 3;
required string group = 4;
// TODO: do we need access time?
}