HDDS-925. Rename ChunkGroupOutputStream to KeyOutputStream and ChunkOutputStream to BlockOutputStream. Contributed by Shashikant Banerjee.
This commit is contained in:
parent
ee10ba26dd
commit
4ff1c46d95
|
@ -65,9 +65,9 @@ import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
|
||||||
* This class encapsulates all state management for buffering and writing
|
* This class encapsulates all state management for buffering and writing
|
||||||
* through to the container.
|
* through to the container.
|
||||||
*/
|
*/
|
||||||
public class ChunkOutputStream extends OutputStream {
|
public class BlockOutputStream extends OutputStream {
|
||||||
public static final Logger LOG =
|
public static final Logger LOG =
|
||||||
LoggerFactory.getLogger(ChunkOutputStream.class);
|
LoggerFactory.getLogger(BlockOutputStream.class);
|
||||||
|
|
||||||
private BlockID blockID;
|
private BlockID blockID;
|
||||||
private final String key;
|
private final String key;
|
||||||
|
@ -108,7 +108,7 @@ public class ChunkOutputStream extends OutputStream {
|
||||||
private int currentBufferIndex;
|
private int currentBufferIndex;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new ChunkOutputStream.
|
* Creates a new BlockOutputStream.
|
||||||
*
|
*
|
||||||
* @param blockID block ID
|
* @param blockID block ID
|
||||||
* @param key chunk key
|
* @param key chunk key
|
||||||
|
@ -122,7 +122,7 @@ public class ChunkOutputStream extends OutputStream {
|
||||||
* @param watchTimeout watch timeout
|
* @param watchTimeout watch timeout
|
||||||
* @param checksum checksum
|
* @param checksum checksum
|
||||||
*/
|
*/
|
||||||
public ChunkOutputStream(BlockID blockID, String key,
|
public BlockOutputStream(BlockID blockID, String key,
|
||||||
XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient,
|
XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient,
|
||||||
String traceID, int chunkSize, long streamBufferFlushSize,
|
String traceID, int chunkSize, long streamBufferFlushSize,
|
||||||
long streamBufferMaxSize, long watchTimeout,
|
long streamBufferMaxSize, long watchTimeout,
|
||||||
|
@ -565,7 +565,7 @@ public class ChunkOutputStream extends OutputStream {
|
||||||
*/
|
*/
|
||||||
private void checkOpen() throws IOException {
|
private void checkOpen() throws IOException {
|
||||||
if (xceiverClient == null) {
|
if (xceiverClient == null) {
|
||||||
throw new IOException("ChunkOutputStream has been closed.");
|
throw new IOException("BlockOutputStream has been closed.");
|
||||||
} else if (ioException != null) {
|
} else if (ioException != null) {
|
||||||
adjustBuffersOnException();
|
adjustBuffersOnException();
|
||||||
throw ioException;
|
throw ioException;
|
|
@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
|
||||||
import org.apache.hadoop.hdds.client.BlockID;
|
import org.apache.hadoop.hdds.client.BlockID;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
|
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
|
||||||
import org.apache.hadoop.ozone.common.Checksum;
|
import org.apache.hadoop.ozone.common.Checksum;
|
||||||
import org.apache.hadoop.ozone.om.helpers.*;
|
import org.apache.hadoop.ozone.om.helpers.*;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
||||||
|
@ -35,7 +36,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
|
||||||
.StorageContainerException;
|
.StorageContainerException;
|
||||||
import org.apache.hadoop.hdds.scm.protocolPB
|
import org.apache.hadoop.hdds.scm.protocolPB
|
||||||
.StorageContainerLocationProtocolClientSideTranslatorPB;
|
.StorageContainerLocationProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdds.scm.storage.ChunkOutputStream;
|
|
||||||
import org.apache.ratis.protocol.RaftRetryFailureException;
|
import org.apache.ratis.protocol.RaftRetryFailureException;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -58,13 +58,13 @@ import java.util.concurrent.TimeoutException;
|
||||||
*
|
*
|
||||||
* TODO : currently not support multi-thread access.
|
* TODO : currently not support multi-thread access.
|
||||||
*/
|
*/
|
||||||
public class ChunkGroupOutputStream extends OutputStream {
|
public class KeyOutputStream extends OutputStream {
|
||||||
|
|
||||||
public static final Logger LOG =
|
public static final Logger LOG =
|
||||||
LoggerFactory.getLogger(ChunkGroupOutputStream.class);
|
LoggerFactory.getLogger(KeyOutputStream.class);
|
||||||
|
|
||||||
// array list's get(index) is O(1)
|
// array list's get(index) is O(1)
|
||||||
private final ArrayList<ChunkOutputStreamEntry> streamEntries;
|
private final ArrayList<BlockOutputStreamEntry> streamEntries;
|
||||||
private int currentStreamIndex;
|
private int currentStreamIndex;
|
||||||
private final OzoneManagerProtocolClientSideTranslatorPB omClient;
|
private final OzoneManagerProtocolClientSideTranslatorPB omClient;
|
||||||
private final
|
private final
|
||||||
|
@ -86,7 +86,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
* A constructor for testing purpose only.
|
* A constructor for testing purpose only.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public ChunkGroupOutputStream() {
|
public KeyOutputStream() {
|
||||||
streamEntries = new ArrayList<>();
|
streamEntries = new ArrayList<>();
|
||||||
omClient = null;
|
omClient = null;
|
||||||
scmClient = null;
|
scmClient = null;
|
||||||
|
@ -116,11 +116,11 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public void addStream(OutputStream outputStream, long length) {
|
public void addStream(OutputStream outputStream, long length) {
|
||||||
streamEntries.add(
|
streamEntries.add(
|
||||||
new ChunkOutputStreamEntry(outputStream, length, checksum));
|
new BlockOutputStreamEntry(outputStream, length, checksum));
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public List<ChunkOutputStreamEntry> getStreamEntries() {
|
public List<BlockOutputStreamEntry> getStreamEntries() {
|
||||||
return streamEntries;
|
return streamEntries;
|
||||||
}
|
}
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -130,7 +130,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
|
|
||||||
public List<OmKeyLocationInfo> getLocationInfoList() throws IOException {
|
public List<OmKeyLocationInfo> getLocationInfoList() throws IOException {
|
||||||
List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
|
List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
|
||||||
for (ChunkOutputStreamEntry streamEntry : streamEntries) {
|
for (BlockOutputStreamEntry streamEntry : streamEntries) {
|
||||||
OmKeyLocationInfo info =
|
OmKeyLocationInfo info =
|
||||||
new OmKeyLocationInfo.Builder().setBlockID(streamEntry.blockID)
|
new OmKeyLocationInfo.Builder().setBlockID(streamEntry.blockID)
|
||||||
.setLength(streamEntry.currentPosition).setOffset(0)
|
.setLength(streamEntry.currentPosition).setOffset(0)
|
||||||
|
@ -143,7 +143,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
return locationInfoList;
|
return locationInfoList;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ChunkGroupOutputStream(OpenKeySession handler,
|
public KeyOutputStream(OpenKeySession handler,
|
||||||
XceiverClientManager xceiverClientManager,
|
XceiverClientManager xceiverClientManager,
|
||||||
StorageContainerLocationProtocolClientSideTranslatorPB scmClient,
|
StorageContainerLocationProtocolClientSideTranslatorPB scmClient,
|
||||||
OzoneManagerProtocolClientSideTranslatorPB omClient, int chunkSize,
|
OzoneManagerProtocolClientSideTranslatorPB omClient, int chunkSize,
|
||||||
|
@ -212,7 +212,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
.getContainerWithPipeline(subKeyInfo.getContainerID());
|
.getContainerWithPipeline(subKeyInfo.getContainerID());
|
||||||
XceiverClientSpi xceiverClient =
|
XceiverClientSpi xceiverClient =
|
||||||
xceiverClientManager.acquireClient(containerWithPipeline.getPipeline());
|
xceiverClientManager.acquireClient(containerWithPipeline.getPipeline());
|
||||||
streamEntries.add(new ChunkOutputStreamEntry(subKeyInfo.getBlockID(),
|
streamEntries.add(new BlockOutputStreamEntry(subKeyInfo.getBlockID(),
|
||||||
keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID,
|
keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID,
|
||||||
chunkSize, subKeyInfo.getLength(), streamBufferFlushSize,
|
chunkSize, subKeyInfo.getLength(), streamBufferFlushSize,
|
||||||
streamBufferMaxSize, watchTimeout, bufferList, checksum));
|
streamBufferMaxSize, watchTimeout, bufferList, checksum));
|
||||||
|
@ -280,7 +280,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
// in theory, this condition should never violate due the check above
|
// in theory, this condition should never violate due the check above
|
||||||
// still do a sanity check.
|
// still do a sanity check.
|
||||||
Preconditions.checkArgument(currentStreamIndex < streamEntries.size());
|
Preconditions.checkArgument(currentStreamIndex < streamEntries.size());
|
||||||
ChunkOutputStreamEntry current = streamEntries.get(currentStreamIndex);
|
BlockOutputStreamEntry current = streamEntries.get(currentStreamIndex);
|
||||||
|
|
||||||
// length(len) will be in int range if the call is happening through
|
// length(len) will be in int range if the call is happening through
|
||||||
// write API of chunkOutputStream. Length can be in long range if it comes
|
// write API of chunkOutputStream. Length can be in long range if it comes
|
||||||
|
@ -323,7 +323,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
// currentStreamIndex < streamEntries.size() signifies that, there are still
|
// currentStreamIndex < streamEntries.size() signifies that, there are still
|
||||||
// pre allocated blocks available.
|
// pre allocated blocks available.
|
||||||
if (currentStreamIndex < streamEntries.size()) {
|
if (currentStreamIndex < streamEntries.size()) {
|
||||||
ListIterator<ChunkOutputStreamEntry> streamEntryIterator =
|
ListIterator<BlockOutputStreamEntry> streamEntryIterator =
|
||||||
streamEntries.listIterator(currentStreamIndex);
|
streamEntries.listIterator(currentStreamIndex);
|
||||||
while (streamEntryIterator.hasNext()) {
|
while (streamEntryIterator.hasNext()) {
|
||||||
if (streamEntryIterator.next().blockID.getContainerID()
|
if (streamEntryIterator.next().blockID.getContainerID()
|
||||||
|
@ -342,7 +342,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
*/
|
*/
|
||||||
private void removeEmptyBlocks() {
|
private void removeEmptyBlocks() {
|
||||||
if (currentStreamIndex < streamEntries.size()) {
|
if (currentStreamIndex < streamEntries.size()) {
|
||||||
ListIterator<ChunkOutputStreamEntry> streamEntryIterator =
|
ListIterator<BlockOutputStreamEntry> streamEntryIterator =
|
||||||
streamEntries.listIterator(currentStreamIndex);
|
streamEntries.listIterator(currentStreamIndex);
|
||||||
while (streamEntryIterator.hasNext()) {
|
while (streamEntryIterator.hasNext()) {
|
||||||
if (streamEntryIterator.next().currentPosition == 0) {
|
if (streamEntryIterator.next().currentPosition == 0) {
|
||||||
|
@ -361,7 +361,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
* @param streamIndex Index of the entry
|
* @param streamIndex Index of the entry
|
||||||
* @throws IOException Throws IOException if Write fails
|
* @throws IOException Throws IOException if Write fails
|
||||||
*/
|
*/
|
||||||
private void handleException(ChunkOutputStreamEntry streamEntry,
|
private void handleException(BlockOutputStreamEntry streamEntry,
|
||||||
int streamIndex) throws IOException {
|
int streamIndex) throws IOException {
|
||||||
long totalSuccessfulFlushedData =
|
long totalSuccessfulFlushedData =
|
||||||
streamEntry.getTotalSuccessfulFlushedData();
|
streamEntry.getTotalSuccessfulFlushedData();
|
||||||
|
@ -428,7 +428,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
* Contact OM to get a new block. Set the new block with the index (e.g.
|
* Contact OM to get a new block. Set the new block with the index (e.g.
|
||||||
* first block has index = 0, second has index = 1 etc.)
|
* first block has index = 0, second has index = 1 etc.)
|
||||||
*
|
*
|
||||||
* The returned block is made to new ChunkOutputStreamEntry to write.
|
* The returned block is made to new BlockOutputStreamEntry to write.
|
||||||
*
|
*
|
||||||
* @param index the index of the block.
|
* @param index the index of the block.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
@ -457,7 +457,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
int size = streamEntries.size();
|
int size = streamEntries.size();
|
||||||
int streamIndex =
|
int streamIndex =
|
||||||
currentStreamIndex >= size ? size - 1 : currentStreamIndex;
|
currentStreamIndex >= size ? size - 1 : currentStreamIndex;
|
||||||
ChunkOutputStreamEntry entry = streamEntries.get(streamIndex);
|
BlockOutputStreamEntry entry = streamEntries.get(streamIndex);
|
||||||
if (entry != null) {
|
if (entry != null) {
|
||||||
try {
|
try {
|
||||||
if (close) {
|
if (close) {
|
||||||
|
@ -507,7 +507,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
omClient.commitKey(keyArgs, openID);
|
omClient.commitKey(keyArgs, openID);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Closing ChunkGroupOutputStream, but key args is null");
|
LOG.warn("Closing KeyOutputStream, but key args is null");
|
||||||
}
|
}
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
throw ioe;
|
throw ioe;
|
||||||
|
@ -524,7 +524,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder class of ChunkGroupOutputStream.
|
* Builder class of KeyOutputStream.
|
||||||
*/
|
*/
|
||||||
public static class Builder {
|
public static class Builder {
|
||||||
private OpenKeySession openHandler;
|
private OpenKeySession openHandler;
|
||||||
|
@ -627,15 +627,15 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ChunkGroupOutputStream build() throws IOException {
|
public KeyOutputStream build() throws IOException {
|
||||||
return new ChunkGroupOutputStream(openHandler, xceiverManager, scmClient,
|
return new KeyOutputStream(openHandler, xceiverManager, scmClient,
|
||||||
omClient, chunkSize, requestID, factor, type, streamBufferFlushSize,
|
omClient, chunkSize, requestID, factor, type, streamBufferFlushSize,
|
||||||
streamBufferMaxSize, blockSize, watchTimeout, checksum,
|
streamBufferMaxSize, blockSize, watchTimeout, checksum,
|
||||||
multipartUploadID, multipartNumber, isMultipartKey);
|
multipartUploadID, multipartNumber, isMultipartKey);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class ChunkOutputStreamEntry extends OutputStream {
|
private static class BlockOutputStreamEntry extends OutputStream {
|
||||||
private OutputStream outputStream;
|
private OutputStream outputStream;
|
||||||
private BlockID blockID;
|
private BlockID blockID;
|
||||||
private final String key;
|
private final String key;
|
||||||
|
@ -654,7 +654,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
private final long watchTimeout;
|
private final long watchTimeout;
|
||||||
private List<ByteBuffer> bufferList;
|
private List<ByteBuffer> bufferList;
|
||||||
|
|
||||||
ChunkOutputStreamEntry(BlockID blockID, String key,
|
BlockOutputStreamEntry(BlockID blockID, String key,
|
||||||
XceiverClientManager xceiverClientManager,
|
XceiverClientManager xceiverClientManager,
|
||||||
XceiverClientSpi xceiverClient, String requestId, int chunkSize,
|
XceiverClientSpi xceiverClient, String requestId, int chunkSize,
|
||||||
long length, long streamBufferFlushSize, long streamBufferMaxSize,
|
long length, long streamBufferFlushSize, long streamBufferMaxSize,
|
||||||
|
@ -681,7 +681,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
* @param outputStream a existing writable output stream
|
* @param outputStream a existing writable output stream
|
||||||
* @param length the length of data to write to the stream
|
* @param length the length of data to write to the stream
|
||||||
*/
|
*/
|
||||||
ChunkOutputStreamEntry(OutputStream outputStream, long length,
|
BlockOutputStreamEntry(OutputStream outputStream, long length,
|
||||||
Checksum checksum) {
|
Checksum checksum) {
|
||||||
this.outputStream = outputStream;
|
this.outputStream = outputStream;
|
||||||
this.blockID = null;
|
this.blockID = null;
|
||||||
|
@ -711,7 +711,7 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
private void checkStream() {
|
private void checkStream() {
|
||||||
if (this.outputStream == null) {
|
if (this.outputStream == null) {
|
||||||
this.outputStream =
|
this.outputStream =
|
||||||
new ChunkOutputStream(blockID, key, xceiverClientManager,
|
new BlockOutputStream(blockID, key, xceiverClientManager,
|
||||||
xceiverClient, requestId, chunkSize, streamBufferFlushSize,
|
xceiverClient, requestId, chunkSize, streamBufferFlushSize,
|
||||||
streamBufferMaxSize, watchTimeout, bufferList, checksum);
|
streamBufferMaxSize, watchTimeout, bufferList, checksum);
|
||||||
}
|
}
|
||||||
|
@ -744,15 +744,15 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
this.outputStream.close();
|
this.outputStream.close();
|
||||||
// after closing the chunkOutPutStream, blockId would have been
|
// after closing the chunkOutPutStream, blockId would have been
|
||||||
// reconstructed with updated bcsId
|
// reconstructed with updated bcsId
|
||||||
if (this.outputStream instanceof ChunkOutputStream) {
|
if (this.outputStream instanceof BlockOutputStream) {
|
||||||
this.blockID = ((ChunkOutputStream) outputStream).getBlockID();
|
this.blockID = ((BlockOutputStream) outputStream).getBlockID();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
long getTotalSuccessfulFlushedData() throws IOException {
|
long getTotalSuccessfulFlushedData() throws IOException {
|
||||||
if (this.outputStream instanceof ChunkOutputStream) {
|
if (this.outputStream instanceof BlockOutputStream) {
|
||||||
ChunkOutputStream out = (ChunkOutputStream) this.outputStream;
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
blockID = out.getBlockID();
|
blockID = out.getBlockID();
|
||||||
return out.getTotalSuccessfulFlushedData();
|
return out.getTotalSuccessfulFlushedData();
|
||||||
} else if (outputStream == null) {
|
} else if (outputStream == null) {
|
||||||
|
@ -765,8 +765,8 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
long getWrittenDataLength() throws IOException {
|
long getWrittenDataLength() throws IOException {
|
||||||
if (this.outputStream instanceof ChunkOutputStream) {
|
if (this.outputStream instanceof BlockOutputStream) {
|
||||||
ChunkOutputStream out = (ChunkOutputStream) this.outputStream;
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
return out.getWrittenDataLength();
|
return out.getWrittenDataLength();
|
||||||
} else if (outputStream == null) {
|
} else if (outputStream == null) {
|
||||||
// For a pre allocated block for which no write has been initiated,
|
// For a pre allocated block for which no write has been initiated,
|
||||||
|
@ -779,16 +779,16 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
|
|
||||||
void cleanup() {
|
void cleanup() {
|
||||||
checkStream();
|
checkStream();
|
||||||
if (this.outputStream instanceof ChunkOutputStream) {
|
if (this.outputStream instanceof BlockOutputStream) {
|
||||||
ChunkOutputStream out = (ChunkOutputStream) this.outputStream;
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
out.cleanup();
|
out.cleanup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void writeOnRetry(long len) throws IOException {
|
void writeOnRetry(long len) throws IOException {
|
||||||
checkStream();
|
checkStream();
|
||||||
if (this.outputStream instanceof ChunkOutputStream) {
|
if (this.outputStream instanceof BlockOutputStream) {
|
||||||
ChunkOutputStream out = (ChunkOutputStream) this.outputStream;
|
BlockOutputStream out = (BlockOutputStream) this.outputStream;
|
||||||
out.writeOnRetry(len);
|
out.writeOnRetry(len);
|
||||||
this.currentPosition += len;
|
this.currentPosition += len;
|
||||||
} else {
|
} else {
|
|
@ -24,14 +24,14 @@ import java.io.OutputStream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* OzoneOutputStream is used to write data into Ozone.
|
* OzoneOutputStream is used to write data into Ozone.
|
||||||
* It uses SCM's {@link ChunkGroupOutputStream} for writing the data.
|
* It uses SCM's {@link KeyOutputStream} for writing the data.
|
||||||
*/
|
*/
|
||||||
public class OzoneOutputStream extends OutputStream {
|
public class OzoneOutputStream extends OutputStream {
|
||||||
|
|
||||||
private final OutputStream outputStream;
|
private final OutputStream outputStream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs OzoneOutputStream with ChunkGroupOutputStream.
|
* Constructs OzoneOutputStream with KeyOutputStream.
|
||||||
*
|
*
|
||||||
* @param outputStream
|
* @param outputStream
|
||||||
*/
|
*/
|
||||||
|
@ -61,8 +61,8 @@ public class OzoneOutputStream extends OutputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
|
public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
|
||||||
if (outputStream instanceof ChunkGroupOutputStream) {
|
if (outputStream instanceof KeyOutputStream) {
|
||||||
return ((ChunkGroupOutputStream) outputStream).getCommitUploadPartInfo();
|
return ((KeyOutputStream) outputStream).getCommitUploadPartInfo();
|
||||||
}
|
}
|
||||||
// Otherwise return null.
|
// Otherwise return null.
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
|
||||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
import org.apache.hadoop.ozone.client.VolumeArgs;
|
import org.apache.hadoop.ozone.client.VolumeArgs;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
|
import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.LengthInputStream;
|
import org.apache.hadoop.ozone.client.io.LengthInputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
|
@ -501,8 +501,8 @@ public class RpcClient implements ClientProtocol {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
|
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
new ChunkGroupOutputStream.Builder()
|
new KeyOutputStream.Builder()
|
||||||
.setHandler(openKey)
|
.setHandler(openKey)
|
||||||
.setXceiverClientManager(xceiverClientManager)
|
.setXceiverClientManager(xceiverClientManager)
|
||||||
.setScmClient(storageContainerLocationClient)
|
.setScmClient(storageContainerLocationClient)
|
||||||
|
@ -726,8 +726,8 @@ public class RpcClient implements ClientProtocol {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
|
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
new ChunkGroupOutputStream.Builder()
|
new KeyOutputStream.Builder()
|
||||||
.setHandler(openKey)
|
.setHandler(openKey)
|
||||||
.setXceiverClientManager(xceiverClientManager)
|
.setXceiverClientManager(xceiverClientManager)
|
||||||
.setScmClient(storageContainerLocationClient)
|
.setScmClient(storageContainerLocationClient)
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
|
||||||
import org.apache.hadoop.ozone.client.OzoneBucket;
|
import org.apache.hadoop.ozone.client.OzoneBucket;
|
||||||
import org.apache.hadoop.ozone.client.OzoneVolume;
|
import org.apache.hadoop.ozone.client.OzoneVolume;
|
||||||
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
import org.apache.hadoop.ozone.container.ContainerTestHelper;
|
import org.apache.hadoop.ozone.container.ContainerTestHelper;
|
||||||
|
@ -133,7 +133,7 @@ public class TestCloseContainerHandlingByClient {
|
||||||
.getBytes(UTF_8);
|
.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
//get the name of a valid container
|
//get the name of a valid container
|
||||||
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
||||||
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
||||||
|
@ -165,7 +165,7 @@ public class TestCloseContainerHandlingByClient {
|
||||||
.getBytes(UTF_8);
|
.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
//get the name of a valid container
|
//get the name of a valid container
|
||||||
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
||||||
.setBucketName(bucketName)
|
.setBucketName(bucketName)
|
||||||
|
@ -188,10 +188,10 @@ public class TestCloseContainerHandlingByClient {
|
||||||
String keyName = getKeyName();
|
String keyName = getKeyName();
|
||||||
OzoneOutputStream key =
|
OzoneOutputStream key =
|
||||||
createKey(keyName, ReplicationType.RATIS, (4 * blockSize));
|
createKey(keyName, ReplicationType.RATIS, (4 * blockSize));
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream keyOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
// With the initial size provided, it should have preallocated 4 blocks
|
// With the initial size provided, it should have preallocated 4 blocks
|
||||||
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(4, keyOutputStream.getStreamEntries().size());
|
||||||
// write data more than 1 chunk
|
// write data more than 1 chunk
|
||||||
byte[] data =
|
byte[] data =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
|
ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
|
||||||
|
@ -199,7 +199,7 @@ public class TestCloseContainerHandlingByClient {
|
||||||
Assert.assertEquals(data.length, 3 * blockSize);
|
Assert.assertEquals(data.length, 3 * blockSize);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
//get the name of a valid container
|
//get the name of a valid container
|
||||||
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
||||||
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
||||||
|
@ -234,12 +234,12 @@ public class TestCloseContainerHandlingByClient {
|
||||||
String keyName = getKeyName();
|
String keyName = getKeyName();
|
||||||
OzoneOutputStream key =
|
OzoneOutputStream key =
|
||||||
createKey(keyName, ReplicationType.RATIS, 4 * blockSize);
|
createKey(keyName, ReplicationType.RATIS, 4 * blockSize);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream keyOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
// With the initial size provided, it should have pre allocated 4 blocks
|
// With the initial size provided, it should have pre allocated 4 blocks
|
||||||
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(4, keyOutputStream.getStreamEntries().size());
|
||||||
String dataString =
|
String dataString =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
|
ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
|
||||||
byte[] data = dataString.getBytes(UTF_8);
|
byte[] data = dataString.getBytes(UTF_8);
|
||||||
|
@ -278,10 +278,10 @@ public class TestCloseContainerHandlingByClient {
|
||||||
String keyName = getKeyName();
|
String keyName = getKeyName();
|
||||||
int keyLen = 4 * blockSize;
|
int keyLen = 4 * blockSize;
|
||||||
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, keyLen);
|
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, keyLen);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream keyOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
// With the initial size provided, it should have preallocated 4 blocks
|
// With the initial size provided, it should have preallocated 4 blocks
|
||||||
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(4, keyOutputStream.getStreamEntries().size());
|
||||||
// write data 3 blocks and one more chunk
|
// write data 3 blocks and one more chunk
|
||||||
byte[] writtenData =
|
byte[] writtenData =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, keyLen)
|
ContainerTestHelper.getFixedLengthString(keyString, keyLen)
|
||||||
|
@ -290,7 +290,7 @@ public class TestCloseContainerHandlingByClient {
|
||||||
Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
|
Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
//get the name of a valid container
|
//get the name of a valid container
|
||||||
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
||||||
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
||||||
|
@ -329,10 +329,10 @@ public class TestCloseContainerHandlingByClient {
|
||||||
private void waitForContainerClose(String keyName,
|
private void waitForContainerClose(String keyName,
|
||||||
OzoneOutputStream outputStream)
|
OzoneOutputStream outputStream)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream keyOutputStream =
|
||||||
(ChunkGroupOutputStream) outputStream.getOutputStream();
|
(KeyOutputStream) outputStream.getOutputStream();
|
||||||
List<OmKeyLocationInfo> locationInfoList =
|
List<OmKeyLocationInfo> locationInfoList =
|
||||||
groupOutputStream.getLocationInfoList();
|
keyOutputStream.getLocationInfoList();
|
||||||
List<Long> containerIdList = new ArrayList<>();
|
List<Long> containerIdList = new ArrayList<>();
|
||||||
for (OmKeyLocationInfo info : locationInfoList) {
|
for (OmKeyLocationInfo info : locationInfoList) {
|
||||||
containerIdList.add(info.getContainerID());
|
containerIdList.add(info.getContainerID());
|
||||||
|
@ -397,18 +397,18 @@ public class TestCloseContainerHandlingByClient {
|
||||||
String keyName = getKeyName();
|
String keyName = getKeyName();
|
||||||
OzoneOutputStream key =
|
OzoneOutputStream key =
|
||||||
createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
|
createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream keyOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
// With the initial size provided, it should have pre allocated 4 blocks
|
// With the initial size provided, it should have pre allocated 4 blocks
|
||||||
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
String dataString =
|
String dataString =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
||||||
byte[] data = dataString.getBytes(UTF_8);
|
byte[] data = dataString.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
List<OmKeyLocationInfo> locationInfos =
|
List<OmKeyLocationInfo> locationInfos =
|
||||||
new ArrayList<>(groupOutputStream.getLocationInfoList());
|
new ArrayList<>(keyOutputStream.getLocationInfoList());
|
||||||
long containerID = locationInfos.get(0).getContainerID();
|
long containerID = locationInfos.get(0).getContainerID();
|
||||||
ContainerInfo container =
|
ContainerInfo container =
|
||||||
cluster.getStorageContainerManager().getContainerManager()
|
cluster.getStorageContainerManager().getContainerManager()
|
||||||
|
@ -423,16 +423,16 @@ public class TestCloseContainerHandlingByClient {
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
||||||
data = dataString.getBytes(UTF_8);
|
data = dataString.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
|
|
||||||
// the 1st block got written. Now all the containers are closed, so the 2nd
|
// the 1st block got written. Now all the containers are closed, so the 2nd
|
||||||
// pre allocated block will be removed from the list and new block should
|
// pre allocated block will be removed from the list and new block should
|
||||||
// have been allocated
|
// have been allocated
|
||||||
Assert.assertTrue(
|
Assert.assertTrue(
|
||||||
groupOutputStream.getLocationInfoList().get(0).getBlockID()
|
keyOutputStream.getLocationInfoList().get(0).getBlockID()
|
||||||
.equals(locationInfos.get(0).getBlockID()));
|
.equals(locationInfos.get(0).getBlockID()));
|
||||||
Assert.assertFalse(
|
Assert.assertFalse(
|
||||||
groupOutputStream.getLocationInfoList().get(1).getBlockID()
|
keyOutputStream.getLocationInfoList().get(1).getBlockID()
|
||||||
.equals(locationInfos.get(1).getBlockID()));
|
.equals(locationInfos.get(1).getBlockID()));
|
||||||
key.close();
|
key.close();
|
||||||
}
|
}
|
||||||
|
@ -463,7 +463,7 @@ public class TestCloseContainerHandlingByClient {
|
||||||
.setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
|
.setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
waitForContainerClose(keyName, key);
|
waitForContainerClose(keyName, key);
|
||||||
// Again Write the Data. This will throw an exception which will be handled
|
// Again Write the Data. This will throw an exception which will be handled
|
||||||
// and new blocks will be allocated
|
// and new blocks will be allocated
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.ozone.client.ObjectStore;
|
import org.apache.hadoop.ozone.client.ObjectStore;
|
||||||
import org.apache.hadoop.ozone.client.OzoneClient;
|
import org.apache.hadoop.ozone.client.OzoneClient;
|
||||||
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
|
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
|
||||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
|
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
|
||||||
|
@ -126,8 +126,8 @@ public class TestContainerStateMachineFailures {
|
||||||
setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
||||||
.setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis")
|
.setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis")
|
||||||
.build();
|
.build();
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
List<OmKeyLocationInfo> locationInfoList =
|
List<OmKeyLocationInfo> locationInfoList =
|
||||||
groupOutputStream.getLocationInfoList();
|
groupOutputStream.getLocationInfoList();
|
||||||
Assert.assertEquals(1, locationInfoList.size());
|
Assert.assertEquals(1, locationInfoList.size());
|
||||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.client.ObjectStore;
|
import org.apache.hadoop.ozone.client.ObjectStore;
|
||||||
import org.apache.hadoop.ozone.client.OzoneClient;
|
import org.apache.hadoop.ozone.client.OzoneClient;
|
||||||
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
import org.apache.hadoop.ozone.container.ContainerTestHelper;
|
import org.apache.hadoop.ozone.container.ContainerTestHelper;
|
||||||
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
|
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
|
||||||
|
@ -121,9 +121,9 @@ public class TestFailureHandlingByClient {
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
// get the name of a valid container
|
// get the name of a valid container
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
List<OmKeyLocationInfo> locationInfoList =
|
List<OmKeyLocationInfo> locationInfoList =
|
||||||
groupOutputStream.getLocationInfoList();
|
groupOutputStream.getLocationInfoList();
|
||||||
Assert.assertTrue(locationInfoList.size() == 1);
|
Assert.assertTrue(locationInfoList.size() == 1);
|
||||||
|
@ -160,9 +160,9 @@ public class TestFailureHandlingByClient {
|
||||||
key.write(data.getBytes());
|
key.write(data.getBytes());
|
||||||
|
|
||||||
// get the name of a valid container
|
// get the name of a valid container
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
List<OmKeyLocationInfo> locationInfoList =
|
List<OmKeyLocationInfo> locationInfoList =
|
||||||
groupOutputStream.getLocationInfoList();
|
groupOutputStream.getLocationInfoList();
|
||||||
Assert.assertTrue(locationInfoList.size() == 2);
|
Assert.assertTrue(locationInfoList.size() == 2);
|
||||||
|
@ -201,9 +201,9 @@ public class TestFailureHandlingByClient {
|
||||||
key.write(data.getBytes());
|
key.write(data.getBytes());
|
||||||
|
|
||||||
// get the name of a valid container
|
// get the name of a valid container
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
(ChunkGroupOutputStream) key.getOutputStream();
|
(KeyOutputStream) key.getOutputStream();
|
||||||
List<OmKeyLocationInfo> locationInfoList =
|
List<OmKeyLocationInfo> locationInfoList =
|
||||||
groupOutputStream.getLocationInfoList();
|
groupOutputStream.getLocationInfoList();
|
||||||
Assert.assertTrue(locationInfoList.size() == 6);
|
Assert.assertTrue(locationInfoList.size() == 6);
|
||||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.hadoop.hdds.client.OzoneQuota;
|
||||||
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
||||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
import org.apache.hadoop.ozone.client.VolumeArgs;
|
import org.apache.hadoop.ozone.client.VolumeArgs;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
import org.apache.hadoop.ozone.common.OzoneChecksumException;
|
import org.apache.hadoop.ozone.common.OzoneChecksumException;
|
||||||
|
@ -666,8 +666,8 @@ public class TestOzoneRpcClient {
|
||||||
OzoneOutputStream out = bucket
|
OzoneOutputStream out = bucket
|
||||||
.createKey(keyName, value.getBytes().length, ReplicationType.RATIS,
|
.createKey(keyName, value.getBytes().length, ReplicationType.RATIS,
|
||||||
ReplicationFactor.THREE);
|
ReplicationFactor.THREE);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
(ChunkGroupOutputStream) out.getOutputStream();
|
(KeyOutputStream) out.getOutputStream();
|
||||||
XceiverClientManager manager = groupOutputStream.getXceiverClientManager();
|
XceiverClientManager manager = groupOutputStream.getXceiverClientManager();
|
||||||
out.write(value.getBytes());
|
out.write(value.getBytes());
|
||||||
out.close();
|
out.close();
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||||
|
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.LengthInputStream;
|
import org.apache.hadoop.ozone.client.io.LengthInputStream;
|
||||||
import org.apache.hadoop.ozone.common.Checksum;
|
import org.apache.hadoop.ozone.common.Checksum;
|
||||||
import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
|
import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
|
||||||
|
@ -38,7 +39,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
|
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
|
import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||||
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
|
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
|
||||||
|
@ -437,8 +437,8 @@ public final class DistributedStorageHandler implements StorageHandler {
|
||||||
.build();
|
.build();
|
||||||
// contact OM to allocate a block for key.
|
// contact OM to allocate a block for key.
|
||||||
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
|
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
|
||||||
ChunkGroupOutputStream groupOutputStream =
|
KeyOutputStream groupOutputStream =
|
||||||
new ChunkGroupOutputStream.Builder()
|
new KeyOutputStream.Builder()
|
||||||
.setHandler(openKey)
|
.setHandler(openKey)
|
||||||
.setXceiverClientManager(xceiverClientManager)
|
.setXceiverClientManager(xceiverClientManager)
|
||||||
.setScmClient(storageContainerLocationClient)
|
.setScmClient(storageContainerLocationClient)
|
||||||
|
|
|
@ -31,7 +31,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class tests ChunkGroupInputStream and ChunkGroupOutStream.
|
* This class tests ChunkGroupInputStream and KeyOutputStream.
|
||||||
*/
|
*/
|
||||||
public class TestChunkStreams {
|
public class TestChunkStreams {
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -31,10 +31,10 @@ import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
|
||||||
*/
|
*/
|
||||||
public class OzoneFSOutputStream extends OutputStream {
|
public class OzoneFSOutputStream extends OutputStream {
|
||||||
|
|
||||||
private final ChunkGroupOutputStream outputStream;
|
private final KeyOutputStream outputStream;
|
||||||
|
|
||||||
public OzoneFSOutputStream(OutputStream outputStream) {
|
public OzoneFSOutputStream(OutputStream outputStream) {
|
||||||
this.outputStream = (ChunkGroupOutputStream)outputStream;
|
this.outputStream = (KeyOutputStream)outputStream;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
Loading…
Reference in New Issue