HDFS-13688. [SBN read] Introduce msync API call. Contributed by Chen Liang.

This commit is contained in:
Erik Krogen 2018-08-01 09:58:04 -07:00 committed by Chen Liang
parent ac2e79330c
commit 849a6c6f0d
9 changed files with 74 additions and 1 deletions

View File

@ -3153,4 +3153,18 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
checkOpen(); checkOpen();
return new OpenFilesIterator(namenode, tracer, openFilesTypes, path); return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
} }
/**
* A blocking call to wait for Observer NameNode state ID to reach to the
* current client state ID. Current client state ID is given by the client
* alignment context.
* An assumption is that client alignment context has the state ID set at this
* point. This is become ObserverReadProxyProvider sets up the initial state
* ID when it is being created.
*
* @throws IOException
*/
public void msync() throws IOException {
namenode.msync();
}
} }

View File

@ -1788,4 +1788,15 @@ public interface ClientProtocol {
@ReadOnly @ReadOnly
BatchedEntries<OpenFileEntry> listOpenFiles(long prevId, BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException; EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException;
/**
* Called by client to wait until the server has reached the state id of the
* client. The client and server state id are given by client side and server
* side alignment context respectively. This can be a blocking call.
*
* @throws IOException
*/
@Idempotent
@ReadOnly
void msync() throws IOException;
} }

View File

@ -158,6 +158,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSa
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
@ -1944,4 +1946,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
} }
@Override
public void msync() throws IOException {
MsyncRequestProto.Builder req = MsyncRequestProto.newBuilder();
try {
rpcProxy.msync(null, req.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
} }

View File

@ -831,6 +831,12 @@ message ListOpenFilesResponseProto {
repeated OpenFilesTypeProto types = 3; repeated OpenFilesTypeProto types = 3;
} }
message MsyncRequestProto {
}
message MsyncResponseProto {
}
service ClientNamenodeProtocol { service ClientNamenodeProtocol {
rpc getBlockLocations(GetBlockLocationsRequestProto) rpc getBlockLocations(GetBlockLocationsRequestProto)
returns(GetBlockLocationsResponseProto); returns(GetBlockLocationsResponseProto);
@ -1017,4 +1023,6 @@ service ClientNamenodeProtocol {
returns(GetQuotaUsageResponseProto); returns(GetQuotaUsageResponseProto);
rpc listOpenFiles(ListOpenFilesRequestProto) rpc listOpenFiles(ListOpenFilesRequestProto)
returns(ListOpenFilesResponseProto); returns(ListOpenFilesResponseProto);
rpc msync(MsyncRequestProto)
returns(MsyncResponseProto);
} }

View File

@ -71,7 +71,8 @@ public class TestReadOnly {
"getDataEncryptionKey", "getDataEncryptionKey",
"getCurrentEditLogTxid", "getCurrentEditLogTxid",
"getEditsFromTxid", "getEditsFromTxid",
"getQuotaUsage" "getQuotaUsage",
"msync"
) )
); );

View File

@ -1535,6 +1535,11 @@ public class RouterClientProtocol implements ClientProtocol {
return null; return null;
} }
@Override
public void msync() throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
}
/** /**
* Determines combinations of eligible src/dst locations for a rename. A * Determines combinations of eligible src/dst locations for a rename. A
* rename cannot change the namespace. Renames are only allowed if there is an * rename cannot change the namespace. Renames are only allowed if there is an

View File

@ -1191,6 +1191,11 @@ public class RouterRpcServer extends AbstractService
return clientProto.listOpenFiles(prevId, openFilesTypes, path); return clientProto.listOpenFiles(prevId, openFilesTypes, path);
} }
@Override // ClientProtocol
public void msync() throws IOException {
clientProto.msync();
}
@Override // NamenodeProtocol @Override // NamenodeProtocol
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size,
long minBlockSize) throws IOException { long minBlockSize) throws IOException {

View File

@ -175,6 +175,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Modify
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
@ -1886,4 +1888,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
@Override
public MsyncResponseProto msync(RpcController controller,
MsyncRequestProto req) throws ServiceException {
try {
server.msync();
return MsyncResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
} }

View File

@ -1366,6 +1366,11 @@ public class NameNodeRpcServer implements NamenodeProtocols {
return namesystem.listOpenFiles(prevId, openFilesTypes, path); return namesystem.listOpenFiles(prevId, openFilesTypes, path);
} }
@Override // ClientProtocol
public void msync() throws IOException {
// TODO : need to be filled up if needed. May be a no-op here.
}
@Override // ClientProtocol @Override // ClientProtocol
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException { throws IOException {