HDFS-8733. Keep server related definition in hdfs.proto on server side. Contributed by Mingliang Liu.

This commit is contained in:
Haohui Mai 2015-09-22 20:57:05 -07:00
parent 63d9f1596c
commit 7c5c099324
17 changed files with 247 additions and 202 deletions

View File

@ -199,12 +199,6 @@ message BlockStoragePolicyProto {
optional StorageTypesProto replicationFallbackPolicy = 5;
}
/**
* A list of storage IDs.
*/
message StorageUuidsProto {
repeated string storageUuids = 1;
}
/**
* A LocatedBlock gives information about a block and its location.
@ -414,68 +408,6 @@ message SnapshotDiffReportProto {
repeated SnapshotDiffReportEntryProto diffReportEntries = 4;
}
/**
* Common node information shared by all the nodes in the cluster
*/
message StorageInfoProto {
required uint32 layoutVersion = 1; // Layout version of the file system
required uint32 namespceID = 2; // File system namespace ID
required string clusterID = 3; // ID of the cluster
required uint64 cTime = 4; // File system creation time
}
/**
* Information sent by a namenode to identify itself to the primary namenode.
*/
message NamenodeRegistrationProto {
required string rpcAddress = 1; // host:port of the namenode RPC address
required string httpAddress = 2; // host:port of the namenode http server
enum NamenodeRoleProto {
NAMENODE = 1;
BACKUP = 2;
CHECKPOINT = 3;
}
required StorageInfoProto storageInfo = 3; // Node information
optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
}
/**
* Unique signature to identify checkpoint transactions.
*/
message CheckpointSignatureProto {
required string blockPoolId = 1;
required uint64 mostRecentCheckpointTxId = 2;
required uint64 curSegmentTxId = 3;
required StorageInfoProto storageInfo = 4;
}
/**
* Command sent from one namenode to another namenode.
*/
message NamenodeCommandProto {
enum Type {
NamenodeCommand = 0; // Base command
CheckPointCommand = 1; // Check point command
}
required uint32 action = 1;
required Type type = 2;
optional CheckpointCommandProto checkpointCmd = 3;
}
/**
* Command returned from primary to checkpointing namenode.
* This command has checkpoint signature that identifies
* checkpoint transaction and is needed for further
* communication related to checkpointing.
*/
message CheckpointCommandProto {
// Unique signature to identify checkpoint transation
required CheckpointSignatureProto signature = 1;
// If true, return transfer image to primary upon the completion of checkpoint
required bool needToReturnImage = 2;
}
/**
* Block information
*
@ -491,104 +423,6 @@ message BlockProto {
optional uint64 numBytes = 3 [default = 0];
}
/**
* Block and datanodes where is it located
*/
message BlockWithLocationsProto {
required BlockProto block = 1; // Block
repeated string datanodeUuids = 2; // Datanodes with replicas of the block
repeated string storageUuids = 3; // Storages with replicas of the block
repeated StorageTypeProto storageTypes = 4;
}
/**
* List of block with locations
*/
message BlocksWithLocationsProto {
repeated BlockWithLocationsProto blocks = 1;
}
/**
* Editlog information with available transactions
*/
message RemoteEditLogProto {
required uint64 startTxId = 1; // Starting available edit log transaction
required uint64 endTxId = 2; // Ending available edit log transaction
optional bool isInProgress = 3 [default = false];
}
/**
* Enumeration of editlogs available on a remote namenode
*/
message RemoteEditLogManifestProto {
repeated RemoteEditLogProto logs = 1;
}
/**
* Namespace information that describes namespace on a namenode
*/
message NamespaceInfoProto {
required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
required uint32 unused = 2; // Retained for backward compatibility
required string blockPoolID = 3; // block pool used by the namespace
required StorageInfoProto storageInfo = 4;// Node information
required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
optional uint64 capabilities = 6 [default = 0]; // feature flags
}
/**
* Block access token information
*/
message BlockKeyProto {
required uint32 keyId = 1; // Key identifier
required uint64 expiryDate = 2; // Expiry time in milliseconds
optional bytes keyBytes = 3; // Key secret
}
/**
* Current key and set of block keys at the namenode.
*/
message ExportedBlockKeysProto {
required bool isBlockTokenEnabled = 1;
required uint64 keyUpdateInterval = 2;
required uint64 tokenLifeTime = 3;
required BlockKeyProto currentKey = 4;
repeated BlockKeyProto allKeys = 5;
}
/**
* State of a block replica at a datanode
*/
enum ReplicaStateProto {
FINALIZED = 0; // State of a replica when it is not modified
RBW = 1; // State of replica that is being written to
RWR = 2; // State of replica that is waiting to be recovered
RUR = 3; // State of replica that is under recovery
TEMPORARY = 4; // State of replica that is created for replication
}
/**
* Block that needs to be recovered with at a given location
*/
message RecoveringBlockProto {
required uint64 newGenStamp = 1; // New genstamp post recovery
required LocatedBlockProto block = 2; // Block to be recovered
optional BlockProto truncateBlock = 3; // New block for recovery (truncate)
}
/**
* void request
*/
message VersionRequestProto {
}
/**
* Version response from namenode.
*/
message VersionResponseProto {
required NamespaceInfoProto info = 1;
}
/**
* Information related to a snapshot
* TODO: add more information

View File

@ -947,6 +947,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9039. Separate client and server side methods of o.a.h.hdfs.
NameNodeProxies. (Mingliang Liu via wheat9)
HDFS-8733. Keep server related definition in hdfs.proto on server side.
(Mingliang Liu via wheat9)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -340,6 +340,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>HdfsServer.proto</include>
<include>DatanodeProtocol.proto</include>
<include>HAZKInfo.proto</include>
<include>InterDatanodeProtocol.proto</include>

View File

@ -113,6 +113,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<imports>
<param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
<param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto</param>
<param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>

View File

@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "hdfs.proto";
import "HdfsServer.proto";
message VersionProto {
required int32 layoutVersion = 1;

View File

@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterData
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;

View File

@ -46,8 +46,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;

View File

@ -24,9 +24,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;

View File

@ -45,27 +45,27 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDele
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ReplicaStateProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;

View File

@ -33,6 +33,7 @@ package hadoop.hdfs.datanode;
import "HAServiceProtocol.proto";
import "hdfs.proto";
import "HdfsServer.proto";
/**
* Information to identify a datanode to a namenode

View File

@ -0,0 +1,201 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and stable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *stable* .proto interface.
*/
// This file contains protocol buffers that are used throughout HDFS -- i.e.
// by the client, server, and data transfer protocols.
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
option java_outer_classname = "HdfsServerProtos";
option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "hdfs.proto";
/**
* A list of storage IDs.
*/
message StorageUuidsProto {
repeated string storageUuids = 1;
}
/**
* Block access token information
*/
message BlockKeyProto {
required uint32 keyId = 1; // Key identifier
required uint64 expiryDate = 2; // Expiry time in milliseconds
optional bytes keyBytes = 3; // Key secret
}
/**
* Current key and set of block keys at the namenode.
*/
message ExportedBlockKeysProto {
required bool isBlockTokenEnabled = 1;
required uint64 keyUpdateInterval = 2;
required uint64 tokenLifeTime = 3;
required BlockKeyProto currentKey = 4;
repeated BlockKeyProto allKeys = 5;
}
/**
* Block and datanodes where is it located
*/
message BlockWithLocationsProto {
required BlockProto block = 1; // Block
repeated string datanodeUuids = 2; // Datanodes with replicas of the block
repeated string storageUuids = 3; // Storages with replicas of the block
repeated StorageTypeProto storageTypes = 4;
}
/**
* List of block with locations
*/
message BlocksWithLocationsProto {
repeated BlockWithLocationsProto blocks = 1;
}
/**
* Editlog information with available transactions
*/
message RemoteEditLogProto {
required uint64 startTxId = 1; // Starting available edit log transaction
required uint64 endTxId = 2; // Ending available edit log transaction
optional bool isInProgress = 3 [default = false];
}
/**
* Enumeration of editlogs available on a remote namenode
*/
message RemoteEditLogManifestProto {
repeated RemoteEditLogProto logs = 1;
}
/**
* Namespace information that describes namespace on a namenode
*/
message NamespaceInfoProto {
required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
required uint32 unused = 2; // Retained for backward compatibility
required string blockPoolID = 3; // block pool used by the namespace
required StorageInfoProto storageInfo = 4;// Node information
required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
optional uint64 capabilities = 6 [default = 0]; // feature flags
}
/**
* State of a block replica at a datanode
*/
enum ReplicaStateProto {
FINALIZED = 0; // State of a replica when it is not modified
RBW = 1; // State of replica that is being written to
RWR = 2; // State of replica that is waiting to be recovered
RUR = 3; // State of replica that is under recovery
TEMPORARY = 4; // State of replica that is created for replication
}
/**
* Block that needs to be recovered with at a given location
*/
message RecoveringBlockProto {
required uint64 newGenStamp = 1; // New genstamp post recovery
required LocatedBlockProto block = 2; // Block to be recovered
optional BlockProto truncateBlock = 3; // New block for recovery (truncate)
}
/**
* Unique signature to identify checkpoint transactions.
*/
message CheckpointSignatureProto {
required string blockPoolId = 1;
required uint64 mostRecentCheckpointTxId = 2;
required uint64 curSegmentTxId = 3;
required StorageInfoProto storageInfo = 4;
}
/**
* Command returned from primary to checkpointing namenode.
* This command has checkpoint signature that identifies
* checkpoint transaction and is needed for further
* communication related to checkpointing.
*/
message CheckpointCommandProto {
// Unique signature to identify checkpoint transation
required CheckpointSignatureProto signature = 1;
// If true, return transfer image to primary upon the completion of checkpoint
required bool needToReturnImage = 2;
}
/**
* Command sent from one namenode to another namenode.
*/
message NamenodeCommandProto {
enum Type {
NamenodeCommand = 0; // Base command
CheckPointCommand = 1; // Check point command
}
required uint32 action = 1;
required Type type = 2;
optional CheckpointCommandProto checkpointCmd = 3;
}
/**
* void request
*/
message VersionRequestProto {
}
/**
* Version response from namenode.
*/
message VersionResponseProto {
required NamespaceInfoProto info = 1;
}
/**
* Common node information shared by all the nodes in the cluster
*/
message StorageInfoProto {
required uint32 layoutVersion = 1; // Layout version of the file system
required uint32 namespceID = 2; // File system namespace ID
required string clusterID = 3; // ID of the cluster
required uint64 cTime = 4; // File system creation time
}
/**
* Information sent by a namenode to identify itself to the primary namenode.
*/
message NamenodeRegistrationProto {
required string rpcAddress = 1; // host:port of the namenode RPC address
required string httpAddress = 2; // host:port of the namenode http server
enum NamenodeRoleProto {
NAMENODE = 1;
BACKUP = 2;
CHECKPOINT = 3;
}
required StorageInfoProto storageInfo = 3; // Node information
optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
}

View File

@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "hdfs.proto";
import "HdfsServer.proto";
/**
* Block with location information and new generation stamp

View File

@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "hdfs.proto";
import "HdfsServer.proto";
/**
* Journal information used by the journal receiver to identify a journal.

View File

@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
package hadoop.hdfs.namenode;
import "hdfs.proto";
import "HdfsServer.proto";
/**
* Get list of blocks for a given datanode with the total length

View File

@ -29,6 +29,7 @@ option java_generate_equals_and_hash = true;
package hadoop.hdfs.qjournal;
import "hdfs.proto";
import "HdfsServer.proto";
message JournalIdProto {
required string identifier = 1;

View File

@ -43,23 +43,23 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommand
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;