HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit is for HDFS-8327 and HDFS-8357). Contributed by Zhe Zhang.

This commit is contained in:
Zhe Zhang 2015-05-11 12:22:12 -07:00 committed by Zhe Zhang
parent 51ea117f88
commit 6bacaa9a52
7 changed files with 23 additions and 55 deletions

View File

@ -88,13 +88,21 @@ public abstract class BlockInfo extends Block
BlockInfo getPrevious(int index) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
return (BlockInfo) triplets[index*3+1];
BlockInfo info = (BlockInfo)triplets[index*3+1];
assert info == null ||
info.getClass().getName().startsWith(BlockInfo.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
}
BlockInfo getNext(int index) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
return (BlockInfo) triplets[index*3+2];
BlockInfo info = (BlockInfo)triplets[index*3+2];
assert info == null || info.getClass().getName().startsWith(
BlockInfo.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
}
void setStorageInfo(int index, DatanodeStorageInfo storage) {

View File

@ -47,18 +47,6 @@ public class BlockInfoContiguous extends BlockInfo {
this.setBlockCollection(from.getBlockCollection());
}
public BlockCollection getBlockCollection() {
return bc;
}
public void setBlockCollection(BlockCollection bc) {
this.bc = bc;
}
public boolean isDeleted() {
return (bc == null);
}
public DatanodeDescriptor getDatanode(int index) {
DatanodeStorageInfo storage = getStorageInfo(index);
return storage == null ? null : storage.getDatanodeDescriptor();
@ -70,32 +58,6 @@ public class BlockInfoContiguous extends BlockInfo {
return (DatanodeStorageInfo)triplets[index*3];
}
private BlockInfoContiguous getPrevious(int index) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+1];
assert info == null ||
info.getClass().getName().startsWith(BlockInfoContiguous.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
}
BlockInfoContiguous getNext(int index) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+2];
assert info == null || info.getClass().getName().startsWith(
BlockInfoContiguous.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
}
private void setStorageInfo(int index, DatanodeStorageInfo storage) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
triplets[index*3] = storage;
}
/**
* Return the previous block on the block list for the datanode at
* position index. Set the previous block on the list to "to".

View File

@ -2477,7 +2477,7 @@ public class BlockManager {
if (rbi.getReportedState() == null) {
// This is a DELETE_BLOCK request
DatanodeStorageInfo storageInfo = rbi.getStorageInfo();
removeStoredBlock(rbi.getBlock(),
removeStoredBlock(getStoredBlock(rbi.getBlock()),
storageInfo.getDatanodeDescriptor());
} else {
processAndHandleReportedBlock(rbi.getStorageInfo(),
@ -3222,7 +3222,7 @@ public class BlockManager {
QUEUE_REASON_FUTURE_GENSTAMP);
return;
}
removeStoredBlock(block, node);
removeStoredBlock(getStoredBlock(block), node);
}
/**

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSPacket;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.RemoteBlockReader2;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
@ -872,7 +873,7 @@ public final class ErasureCodingWorker {
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
HdfsServerConstants.SMALL_BUFFER_SIZE));
DFSUtil.getSmallBufferSize(conf)));
in = new DataInputStream(unbufIn);
DatanodeInfo source = new DatanodeInfo(datanode.getDatanodeId());

View File

@ -706,11 +706,7 @@ public class INodeFile extends INodeWithAdditionalFields
*/
public final QuotaCounts computeQuotaUsageWithStriped(
BlockStoragePolicySuite bsps, QuotaCounts counts) {
long nsDelta = 1;
final long ssDelta = storagespaceConsumed();
counts.addNameSpace(nsDelta);
counts.addStorageSpace(ssDelta);
return counts;
return null;
}
@Override
@ -979,11 +975,11 @@ public class INodeFile extends INodeWithAdditionalFields
}
long size = 0;
for (BlockInfoContiguous b : blocks) {
for (BlockInfo b : blocks) {
size += b.getNumBytes();
}
BlockInfoContiguous[] sblocks = null;
BlockInfo[] sblocks = null;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiff diff = sf.getDiffs().getLast();

View File

@ -109,8 +109,8 @@ public class TestStripedINodeFile {
// a. <Cell Size> * (<Num Stripes> - 1) * <Total Block Num> = 0
// b. <Num Bytes> % <Num Bytes per Stripes> = 1
// c. <Last Stripe Length> * <Parity Block Num> = 1 * 3
assertEquals(4, inf.storagespaceConsumedWithStriped());
assertEquals(4, inf.storagespaceConsumed());
assertEquals(4, inf.storagespaceConsumedWithStriped(null));
assertEquals(4, inf.storagespaceConsumed(null));
}
@Test
@ -134,8 +134,8 @@ public class TestStripedINodeFile {
inf.addBlock(blockInfoStriped1);
inf.addBlock(blockInfoStriped2);
// This is the double size of one block in above case.
assertEquals(4 * 2, inf.storagespaceConsumedWithStriped());
assertEquals(4 * 2, inf.storagespaceConsumed());
assertEquals(4 * 2, inf.storagespaceConsumedWithStriped(null));
assertEquals(4 * 2, inf.storagespaceConsumed(null));
}
@Test

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
@ -77,7 +78,7 @@ public class TestTruncateQuotaUpdate {
@Test
public void testTruncateWithSnapshotNoDivergence() {
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
addSnapshotFeature(file, file.getBlocks());
addSnapshotFeature(file, file.getContiguousBlocks());
// case 4: truncate to 1.5 blocks
// all the blocks are in snapshot. truncate need to allocate a new block