HDFS-8751. Remove setBlocks API from INodeFile and misc code cleanup. Contributed by Zhe Zhang
(cherry picked from commit 47f4c54106
)
This commit is contained in:
parent
b169889f01
commit
0e8c335e7c
|
@ -374,6 +374,9 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-8726. Move protobuf files that define the client-sever protocols to
|
||||
hdfs-client. (wheat9)
|
||||
|
||||
HDFS-8751. Remove setBlocks API from INodeFile and misc code cleanup. (Zhe
|
||||
Zhang via jing9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
|
@ -228,7 +228,7 @@ class FSDirConcatOp {
|
|||
int count = 0;
|
||||
for (INodeFile nodeToRemove : srcList) {
|
||||
if(nodeToRemove != null) {
|
||||
nodeToRemove.setBlocks(null);
|
||||
nodeToRemove.clearBlocks();
|
||||
nodeToRemove.getParent().removeChild(nodeToRemove);
|
||||
fsd.getINodeMap().remove(nodeToRemove);
|
||||
count++;
|
||||
|
|
|
@ -369,7 +369,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
/** Set the replication factor of this file. */
|
||||
public final void setFileReplication(short replication) {
|
||||
private void setFileReplication(short replication) {
|
||||
header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
|
||||
}
|
||||
|
||||
|
@ -413,33 +413,35 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
setStoragePolicyID(storagePolicyId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Override // INodeFileAttributes
|
||||
public long getHeaderLong() {
|
||||
return header;
|
||||
}
|
||||
|
||||
/** @return the blocks of the file. */
|
||||
@Override
|
||||
@Override // BlockCollection
|
||||
public BlockInfo[] getBlocks() {
|
||||
return this.blocks;
|
||||
}
|
||||
|
||||
/** @return blocks of the file corresponding to the snapshot. */
|
||||
public BlockInfo[] getBlocks(int snapshot) {
|
||||
if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
|
||||
if(snapshot == CURRENT_STATE_ID || getDiffs() == null) {
|
||||
return getBlocks();
|
||||
}
|
||||
FileDiff diff = getDiffs().getDiffById(snapshot);
|
||||
BlockInfo[] snapshotBlocks =
|
||||
diff == null ? getBlocks() : diff.getBlocks();
|
||||
if(snapshotBlocks != null)
|
||||
BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
|
||||
if (snapshotBlocks != null) {
|
||||
return snapshotBlocks;
|
||||
}
|
||||
// Blocks are not in the current snapshot
|
||||
// Find next snapshot with blocks present or return current file blocks
|
||||
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
|
||||
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
|
||||
}
|
||||
|
||||
void updateBlockCollection() {
|
||||
/** Used during concat to update the BlockCollection for each block. */
|
||||
private void updateBlockCollection() {
|
||||
if (blocks != null) {
|
||||
for(BlockInfo b : blocks) {
|
||||
b.setBlockCollection(this);
|
||||
|
@ -486,10 +488,15 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
/** Set the blocks. */
|
||||
public void setBlocks(BlockInfo[] blocks) {
|
||||
private void setBlocks(BlockInfo[] blocks) {
|
||||
this.blocks = blocks;
|
||||
}
|
||||
|
||||
/** Clear all blocks of the file. */
|
||||
public void clearBlocks() {
|
||||
setBlocks(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanSubtree(ReclaimContext reclaimContext,
|
||||
final int snapshot, int priorSnapshotId) {
|
||||
|
@ -543,7 +550,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
blk.setBlockCollection(null);
|
||||
}
|
||||
}
|
||||
setBlocks(null);
|
||||
clearBlocks();
|
||||
if (getAclFeature() != null) {
|
||||
AclStorage.removeAclFeature(getAclFeature());
|
||||
}
|
||||
|
@ -783,16 +790,18 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
public long collectBlocksBeyondMax(final long max,
|
||||
final BlocksMapUpdateInfo collectedBlocks) {
|
||||
final BlockInfo[] oldBlocks = getBlocks();
|
||||
if (oldBlocks == null)
|
||||
if (oldBlocks == null) {
|
||||
return 0;
|
||||
}
|
||||
// find the minimum n such that the size of the first n blocks > max
|
||||
int n = 0;
|
||||
long size = 0;
|
||||
for(; n < oldBlocks.length && max > size; n++) {
|
||||
size += oldBlocks[n].getNumBytes();
|
||||
}
|
||||
if (n >= oldBlocks.length)
|
||||
if (n >= oldBlocks.length) {
|
||||
return size;
|
||||
}
|
||||
|
||||
// starting from block n, the data is beyond max.
|
||||
// resize the array.
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -1102,4 +1103,12 @@ public class TestINodeFile {
|
|||
f1 = inf.getXAttrFeature();
|
||||
assertEquals(f1, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClearBlocks() {
|
||||
INodeFile toBeCleared = createINodeFiles(1, "toBeCleared")[0];
|
||||
assertEquals(1, toBeCleared.getBlocks().length);
|
||||
toBeCleared.clearBlocks();
|
||||
assertNull(toBeCleared.getBlocks());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue