HDFS-6830. Revert accidental checkin
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1616884 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a70c9de3f1
commit
05d1bf4157
|
@ -479,9 +479,6 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6791. A block could remain under replicated if all of its replicas are on
|
HDFS-6791. A block could remain under replicated if all of its replicas are on
|
||||||
decommissioned nodes. (Ming Ma via jing9)
|
decommissioned nodes. (Ming Ma via jing9)
|
||||||
|
|
||||||
HDFS-6830. BlockInfo.addStorage fails when DN changes the storage for a
|
|
||||||
block replica. (Arpit Agarwal)
|
|
||||||
|
|
||||||
Release 2.5.0 - UNRELEASED
|
Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -194,12 +194,24 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
|
||||||
* Add a {@link DatanodeStorageInfo} location for a block
|
* Add a {@link DatanodeStorageInfo} location for a block
|
||||||
*/
|
*/
|
||||||
boolean addStorage(DatanodeStorageInfo storage) {
|
boolean addStorage(DatanodeStorageInfo storage) {
|
||||||
|
boolean added = true;
|
||||||
|
int idx = findDatanode(storage.getDatanodeDescriptor());
|
||||||
|
if(idx >= 0) {
|
||||||
|
if (getStorageInfo(idx) == storage) { // the storage is already there
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
// The block is on the DN but belongs to a different storage.
|
||||||
|
// Update our state.
|
||||||
|
removeStorage(getStorageInfo(idx));
|
||||||
|
added = false; // Just updating storage. Return false.
|
||||||
|
}
|
||||||
|
}
|
||||||
// find the last null node
|
// find the last null node
|
||||||
int lastNode = ensureCapacity(1);
|
int lastNode = ensureCapacity(1);
|
||||||
setStorageInfo(lastNode, storage);
|
setStorageInfo(lastNode, storage);
|
||||||
setNext(lastNode, null);
|
setNext(lastNode, null);
|
||||||
setPrevious(lastNode, null);
|
setPrevious(lastNode, null);
|
||||||
return true;
|
return added;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -228,18 +240,16 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
|
||||||
* Find specified DatanodeDescriptor.
|
* Find specified DatanodeDescriptor.
|
||||||
* @return index or -1 if not found.
|
* @return index or -1 if not found.
|
||||||
*/
|
*/
|
||||||
boolean findDatanode(DatanodeDescriptor dn) {
|
int findDatanode(DatanodeDescriptor dn) {
|
||||||
int len = getCapacity();
|
int len = getCapacity();
|
||||||
for(int idx = 0; idx < len; idx++) {
|
for(int idx = 0; idx < len; idx++) {
|
||||||
DatanodeDescriptor cur = getDatanode(idx);
|
DatanodeDescriptor cur = getDatanode(idx);
|
||||||
if(cur == dn) {
|
if(cur == dn)
|
||||||
return true;
|
return idx;
|
||||||
}
|
if(cur == null)
|
||||||
if(cur == null) {
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false;
|
return -1;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Find specified DatanodeStorageInfo.
|
* Find specified DatanodeStorageInfo.
|
||||||
|
|
|
@ -2065,7 +2065,7 @@ public class BlockManager {
|
||||||
// Add replica if appropriate. If the replica was previously corrupt
|
// Add replica if appropriate. If the replica was previously corrupt
|
||||||
// but now okay, it might need to be updated.
|
// but now okay, it might need to be updated.
|
||||||
if (reportedState == ReplicaState.FINALIZED
|
if (reportedState == ReplicaState.FINALIZED
|
||||||
&& (!storedBlock.findDatanode(dn)
|
&& (storedBlock.findDatanode(dn) < 0
|
||||||
|| corruptReplicas.isReplicaCorrupt(storedBlock, dn))) {
|
|| corruptReplicas.isReplicaCorrupt(storedBlock, dn))) {
|
||||||
toAdd.add(storedBlock);
|
toAdd.add(storedBlock);
|
||||||
}
|
}
|
||||||
|
@ -2246,7 +2246,7 @@ public class BlockManager {
|
||||||
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
|
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
|
||||||
|
|
||||||
if (ucBlock.reportedState == ReplicaState.FINALIZED &&
|
if (ucBlock.reportedState == ReplicaState.FINALIZED &&
|
||||||
!block.findDatanode(storageInfo.getDatanodeDescriptor())) {
|
block.findDatanode(storageInfo.getDatanodeDescriptor()) < 0) {
|
||||||
addStoredBlock(block, storageInfo, null, true);
|
addStoredBlock(block, storageInfo, null, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -208,28 +208,12 @@ public class DatanodeStorageInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean addBlock(BlockInfo b) {
|
public boolean addBlock(BlockInfo b) {
|
||||||
// First check whether the block belongs to a different storage
|
if(!b.addStorage(this))
|
||||||
// on the same DN.
|
return false;
|
||||||
boolean replaced = false;
|
|
||||||
DatanodeStorageInfo otherStorage =
|
|
||||||
b.findStorageInfo(getDatanodeDescriptor());
|
|
||||||
|
|
||||||
if (otherStorage != null) {
|
|
||||||
if (otherStorage != this) {
|
|
||||||
// The block belongs to a different storage. Remove it first.
|
|
||||||
otherStorage.removeBlock(b);
|
|
||||||
replaced = true;
|
|
||||||
} else {
|
|
||||||
// The block is already associated with this storage.
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add to the head of the data-node list
|
// add to the head of the data-node list
|
||||||
b.addStorage(this);
|
|
||||||
blockList = b.listInsert(blockList, this);
|
blockList = b.listInsert(blockList, this);
|
||||||
numBlocks++;
|
numBlocks++;
|
||||||
return !replaced;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean removeBlock(BlockInfo b) {
|
boolean removeBlock(BlockInfo b) {
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import static org.hamcrest.core.Is.is;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
|
||||||
|
@ -60,24 +59,17 @@ public class TestBlockInfo {
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReplaceStorage() throws Exception {
|
public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception {
|
||||||
|
BlockInfo blockInfo = new BlockInfo(3);
|
||||||
|
|
||||||
// Create two dummy storages.
|
|
||||||
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
|
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
|
||||||
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
|
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
|
||||||
final int NUM_BLOCKS = 10;
|
|
||||||
BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];
|
|
||||||
|
|
||||||
// Create a few dummy blocks and add them to the first storage.
|
blockInfo.addStorage(storage1);
|
||||||
for (int i = 0; i < NUM_BLOCKS; ++i) {
|
boolean added = blockInfo.addStorage(storage2);
|
||||||
blockInfos[i] = new BlockInfo(3);
|
|
||||||
storage1.addBlock(blockInfos[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to move one of the blocks to a different storage.
|
Assert.assertFalse(added);
|
||||||
boolean added = storage2.addBlock(blockInfos[NUM_BLOCKS/2]);
|
Assert.assertEquals(storage2, blockInfo.getStorageInfo(0));
|
||||||
Assert.assertThat(added, is(false));
|
|
||||||
Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
Loading…
Reference in New Issue