HDFS-8751. Remove setBlocks API from INodeFile and misc code cleanup. Contributed by Zhe Zhang
(cherry picked from commit 47f4c54106
)
This commit is contained in:
parent
b169889f01
commit
0e8c335e7c
|
@ -374,6 +374,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-8726. Move protobuf files that define the client-sever protocols to
|
HDFS-8726. Move protobuf files that define the client-sever protocols to
|
||||||
hdfs-client. (wheat9)
|
hdfs-client. (wheat9)
|
||||||
|
|
||||||
|
HDFS-8751. Remove setBlocks API from INodeFile and misc code cleanup. (Zhe
|
||||||
|
Zhang via jing9)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -228,7 +228,7 @@ class FSDirConcatOp {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (INodeFile nodeToRemove : srcList) {
|
for (INodeFile nodeToRemove : srcList) {
|
||||||
if(nodeToRemove != null) {
|
if(nodeToRemove != null) {
|
||||||
nodeToRemove.setBlocks(null);
|
nodeToRemove.clearBlocks();
|
||||||
nodeToRemove.getParent().removeChild(nodeToRemove);
|
nodeToRemove.getParent().removeChild(nodeToRemove);
|
||||||
fsd.getINodeMap().remove(nodeToRemove);
|
fsd.getINodeMap().remove(nodeToRemove);
|
||||||
count++;
|
count++;
|
||||||
|
|
|
@ -369,7 +369,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Set the replication factor of this file. */
|
/** Set the replication factor of this file. */
|
||||||
public final void setFileReplication(short replication) {
|
private void setFileReplication(short replication) {
|
||||||
header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
|
header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,33 +413,35 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
setStoragePolicyID(storagePolicyId);
|
setStoragePolicyID(storagePolicyId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override // INodeFileAttributes
|
||||||
public long getHeaderLong() {
|
public long getHeaderLong() {
|
||||||
return header;
|
return header;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the blocks of the file. */
|
/** @return the blocks of the file. */
|
||||||
@Override
|
@Override // BlockCollection
|
||||||
public BlockInfo[] getBlocks() {
|
public BlockInfo[] getBlocks() {
|
||||||
return this.blocks;
|
return this.blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return blocks of the file corresponding to the snapshot. */
|
/** @return blocks of the file corresponding to the snapshot. */
|
||||||
public BlockInfo[] getBlocks(int snapshot) {
|
public BlockInfo[] getBlocks(int snapshot) {
|
||||||
if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
|
if(snapshot == CURRENT_STATE_ID || getDiffs() == null) {
|
||||||
return getBlocks();
|
return getBlocks();
|
||||||
|
}
|
||||||
FileDiff diff = getDiffs().getDiffById(snapshot);
|
FileDiff diff = getDiffs().getDiffById(snapshot);
|
||||||
BlockInfo[] snapshotBlocks =
|
BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
|
||||||
diff == null ? getBlocks() : diff.getBlocks();
|
if (snapshotBlocks != null) {
|
||||||
if(snapshotBlocks != null)
|
|
||||||
return snapshotBlocks;
|
return snapshotBlocks;
|
||||||
|
}
|
||||||
// Blocks are not in the current snapshot
|
// Blocks are not in the current snapshot
|
||||||
// Find next snapshot with blocks present or return current file blocks
|
// Find next snapshot with blocks present or return current file blocks
|
||||||
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
|
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
|
||||||
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
|
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateBlockCollection() {
|
/** Used during concat to update the BlockCollection for each block. */
|
||||||
|
private void updateBlockCollection() {
|
||||||
if (blocks != null) {
|
if (blocks != null) {
|
||||||
for(BlockInfo b : blocks) {
|
for(BlockInfo b : blocks) {
|
||||||
b.setBlockCollection(this);
|
b.setBlockCollection(this);
|
||||||
|
@ -486,10 +488,15 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Set the blocks. */
|
/** Set the blocks. */
|
||||||
public void setBlocks(BlockInfo[] blocks) {
|
private void setBlocks(BlockInfo[] blocks) {
|
||||||
this.blocks = blocks;
|
this.blocks = blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Clear all blocks of the file. */
|
||||||
|
public void clearBlocks() {
|
||||||
|
setBlocks(null);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void cleanSubtree(ReclaimContext reclaimContext,
|
public void cleanSubtree(ReclaimContext reclaimContext,
|
||||||
final int snapshot, int priorSnapshotId) {
|
final int snapshot, int priorSnapshotId) {
|
||||||
|
@ -543,7 +550,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
blk.setBlockCollection(null);
|
blk.setBlockCollection(null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
setBlocks(null);
|
clearBlocks();
|
||||||
if (getAclFeature() != null) {
|
if (getAclFeature() != null) {
|
||||||
AclStorage.removeAclFeature(getAclFeature());
|
AclStorage.removeAclFeature(getAclFeature());
|
||||||
}
|
}
|
||||||
|
@ -783,16 +790,18 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
public long collectBlocksBeyondMax(final long max,
|
public long collectBlocksBeyondMax(final long max,
|
||||||
final BlocksMapUpdateInfo collectedBlocks) {
|
final BlocksMapUpdateInfo collectedBlocks) {
|
||||||
final BlockInfo[] oldBlocks = getBlocks();
|
final BlockInfo[] oldBlocks = getBlocks();
|
||||||
if (oldBlocks == null)
|
if (oldBlocks == null) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
// find the minimum n such that the size of the first n blocks > max
|
// find the minimum n such that the size of the first n blocks > max
|
||||||
int n = 0;
|
int n = 0;
|
||||||
long size = 0;
|
long size = 0;
|
||||||
for(; n < oldBlocks.length && max > size; n++) {
|
for(; n < oldBlocks.length && max > size; n++) {
|
||||||
size += oldBlocks[n].getNumBytes();
|
size += oldBlocks[n].getNumBytes();
|
||||||
}
|
}
|
||||||
if (n >= oldBlocks.length)
|
if (n >= oldBlocks.length) {
|
||||||
return size;
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
// starting from block n, the data is beyond max.
|
// starting from block n, the data is beyond max.
|
||||||
// resize the array.
|
// resize the array.
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
@ -1102,4 +1103,12 @@ public class TestINodeFile {
|
||||||
f1 = inf.getXAttrFeature();
|
f1 = inf.getXAttrFeature();
|
||||||
assertEquals(f1, null);
|
assertEquals(f1, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testClearBlocks() {
|
||||||
|
INodeFile toBeCleared = createINodeFiles(1, "toBeCleared")[0];
|
||||||
|
assertEquals(1, toBeCleared.getBlocks().length);
|
||||||
|
toBeCleared.clearBlocks();
|
||||||
|
assertNull(toBeCleared.getBlocks());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue