HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch. (vinayakumarb)
This commit is contained in:
parent
2ad668748e
commit
1c13519e1e
@ -656,6 +656,9 @@ Release 2.8.0 - UNRELEASED
|
|||||||
HDFS-6249. Output AclEntry in PBImageXmlWriter.
|
HDFS-6249. Output AclEntry in PBImageXmlWriter.
|
||||||
(surendra singh lilhore via aajisaka)
|
(surendra singh lilhore via aajisaka)
|
||||||
|
|
||||||
|
HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch.
|
||||||
|
(vinayakumarb via wang)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
@ -64,6 +64,8 @@
|
|||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.Sampler;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.Trace;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.TraceScope;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
@ -86,6 +88,7 @@
|
|||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class DFSOutputStream extends FSOutputSummer
|
public class DFSOutputStream extends FSOutputSummer
|
||||||
implements Syncable, CanSetDropBehind {
|
implements Syncable, CanSetDropBehind {
|
||||||
|
static final Logger LOG = LoggerFactory.getLogger(DFSOutputStream.class);
|
||||||
/**
|
/**
|
||||||
* Number of times to retry creating a file when there are transient
|
* Number of times to retry creating a file when there are transient
|
||||||
* errors (typically related to encryption zones and KeyProvider operations).
|
* errors (typically related to encryption zones and KeyProvider operations).
|
||||||
@ -413,23 +416,32 @@ protected synchronized void writeChunk(byte[] b, int offset, int len,
|
|||||||
//
|
//
|
||||||
if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
|
if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
|
||||||
getStreamer().getBytesCurBlock() == blockSize) {
|
getStreamer().getBytesCurBlock() == blockSize) {
|
||||||
if (DFSClient.LOG.isDebugEnabled()) {
|
enqueueCurrentPacketFull();
|
||||||
DFSClient.LOG.debug("DFSClient writeChunk packet full seqno=" +
|
|
||||||
currentPacket.getSeqno() +
|
|
||||||
", src=" + src +
|
|
||||||
", bytesCurBlock=" + getStreamer().getBytesCurBlock() +
|
|
||||||
", blockSize=" + blockSize +
|
|
||||||
", appendChunk=" + getStreamer().getAppendChunk());
|
|
||||||
}
|
|
||||||
getStreamer().waitAndQueuePacket(currentPacket);
|
|
||||||
currentPacket = null;
|
|
||||||
|
|
||||||
adjustChunkBoundary();
|
|
||||||
|
|
||||||
endBlock();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void enqueueCurrentPacket() throws IOException {
|
||||||
|
getStreamer().waitAndQueuePacket(currentPacket);
|
||||||
|
currentPacket = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
void enqueueCurrentPacketFull() throws IOException {
|
||||||
|
LOG.debug("enqueue full {}, src={}, bytesCurBlock={}, blockSize={},"
|
||||||
|
+ " appendChunk={}, {}", currentPacket, src, getStreamer()
|
||||||
|
.getBytesCurBlock(), blockSize, getStreamer().getAppendChunk(),
|
||||||
|
getStreamer());
|
||||||
|
enqueueCurrentPacket();
|
||||||
|
adjustChunkBoundary();
|
||||||
|
endBlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** create an empty packet to mark the end of the block. */
|
||||||
|
void setCurrentPacketToEmpty() throws InterruptedIOException {
|
||||||
|
currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
|
||||||
|
getStreamer().getAndIncCurrentSeqno(), true);
|
||||||
|
currentPacket.setSyncBlock(shouldSyncBlock);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the reopened file did not end at chunk boundary and the above
|
* If the reopened file did not end at chunk boundary and the above
|
||||||
* write filled up its partial chunk. Tell the summer to generate full
|
* write filled up its partial chunk. Tell the summer to generate full
|
||||||
@ -457,11 +469,8 @@ protected void adjustChunkBoundary() {
|
|||||||
*/
|
*/
|
||||||
protected void endBlock() throws IOException {
|
protected void endBlock() throws IOException {
|
||||||
if (getStreamer().getBytesCurBlock() == blockSize) {
|
if (getStreamer().getBytesCurBlock() == blockSize) {
|
||||||
currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
|
setCurrentPacketToEmpty();
|
||||||
getStreamer().getAndIncCurrentSeqno(), true);
|
enqueueCurrentPacket();
|
||||||
currentPacket.setSyncBlock(shouldSyncBlock);
|
|
||||||
getStreamer().waitAndQueuePacket(currentPacket);
|
|
||||||
currentPacket = null;
|
|
||||||
getStreamer().setBytesCurBlock(0);
|
getStreamer().setBytesCurBlock(0);
|
||||||
lastFlushOffset = 0;
|
lastFlushOffset = 0;
|
||||||
}
|
}
|
||||||
@ -586,8 +595,7 @@ private void flushOrSync(boolean isSync, EnumSet<SyncFlag> syncFlags)
|
|||||||
}
|
}
|
||||||
if (currentPacket != null) {
|
if (currentPacket != null) {
|
||||||
currentPacket.setSyncBlock(isSync);
|
currentPacket.setSyncBlock(isSync);
|
||||||
getStreamer().waitAndQueuePacket(currentPacket);
|
enqueueCurrentPacket();
|
||||||
currentPacket = null;
|
|
||||||
}
|
}
|
||||||
if (endBlock && getStreamer().getBytesCurBlock() > 0) {
|
if (endBlock && getStreamer().getBytesCurBlock() > 0) {
|
||||||
// Need to end the current block, thus send an empty packet to
|
// Need to end the current block, thus send an empty packet to
|
||||||
@ -595,8 +603,7 @@ private void flushOrSync(boolean isSync, EnumSet<SyncFlag> syncFlags)
|
|||||||
currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
|
currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
|
||||||
getStreamer().getAndIncCurrentSeqno(), true);
|
getStreamer().getAndIncCurrentSeqno(), true);
|
||||||
currentPacket.setSyncBlock(shouldSyncBlock || isSync);
|
currentPacket.setSyncBlock(shouldSyncBlock || isSync);
|
||||||
getStreamer().waitAndQueuePacket(currentPacket);
|
enqueueCurrentPacket();
|
||||||
currentPacket = null;
|
|
||||||
getStreamer().setBytesCurBlock(0);
|
getStreamer().setBytesCurBlock(0);
|
||||||
lastFlushOffset = 0;
|
lastFlushOffset = 0;
|
||||||
} else {
|
} else {
|
||||||
@ -775,15 +782,11 @@ protected synchronized void closeImpl() throws IOException {
|
|||||||
flushBuffer(); // flush from all upper layers
|
flushBuffer(); // flush from all upper layers
|
||||||
|
|
||||||
if (currentPacket != null) {
|
if (currentPacket != null) {
|
||||||
getStreamer().waitAndQueuePacket(currentPacket);
|
enqueueCurrentPacket();
|
||||||
currentPacket = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getStreamer().getBytesCurBlock() != 0) {
|
if (getStreamer().getBytesCurBlock() != 0) {
|
||||||
// send an empty packet to mark the end of the block
|
setCurrentPacketToEmpty();
|
||||||
currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
|
|
||||||
getStreamer().getAndIncCurrentSeqno(), true);
|
|
||||||
currentPacket.setSyncBlock(shouldSyncBlock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
flushInternal(); // flush all data to Datanodes
|
flushInternal(); // flush all data to Datanodes
|
||||||
|
@ -44,7 +44,6 @@
|
|||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
@ -1901,4 +1900,10 @@ void closeSocket() throws IOException {
|
|||||||
s.close();
|
s.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return (block == null? null: block.getLocalBlock())
|
||||||
|
+ "@" + Arrays.toString(getNodes());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user