HDFS-11097. Fix warnings for deprecated StorageReceivedDeletedBlocks constructor. Contributed by Yiqun Lin.
This commit is contained in:
parent
9ee0e3172e
commit
b71907b2ae
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponsePro
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||||
|
@ -230,7 +231,8 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
||||||
info[i] = new StorageReceivedDeletedBlocks(
|
info[i] = new StorageReceivedDeletedBlocks(
|
||||||
PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
|
PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
|
||||||
} else {
|
} else {
|
||||||
info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks);
|
info[i] = new StorageReceivedDeletedBlocks(
|
||||||
|
new DatanodeStorage(sBlock.getStorageUuid()), rdBlocks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -28,6 +28,9 @@ public class StorageReceivedDeletedBlocks {
|
||||||
final DatanodeStorage storage;
|
final DatanodeStorage storage;
|
||||||
private final ReceivedDeletedBlockInfo[] blocks;
|
private final ReceivedDeletedBlockInfo[] blocks;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated Use {@link #getStorage()} instead
|
||||||
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public String getStorageID() {
|
public String getStorageID() {
|
||||||
return storage.getStorageID();
|
return storage.getStorageID();
|
||||||
|
@ -41,6 +44,10 @@ public class StorageReceivedDeletedBlocks {
|
||||||
return blocks;
|
return blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated Use {@link #StorageReceivedDeletedBlocks(
|
||||||
|
* DatanodeStorage, ReceivedDeletedBlockInfo[])} instead
|
||||||
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public StorageReceivedDeletedBlocks(final String storageID,
|
public StorageReceivedDeletedBlocks(final String storageID,
|
||||||
final ReceivedDeletedBlockInfo[] blocks) {
|
final ReceivedDeletedBlockInfo[] blocks) {
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||||
|
@ -312,7 +313,8 @@ public class TestPendingReconstruction {
|
||||||
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
|
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
|
||||||
poolId);
|
poolId);
|
||||||
StorageReceivedDeletedBlocks[] report = {
|
StorageReceivedDeletedBlocks[] report = {
|
||||||
new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
|
new StorageReceivedDeletedBlocks(
|
||||||
|
new DatanodeStorage("Fake-storage-ID-Ignored"),
|
||||||
new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
|
new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
|
||||||
blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
|
blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
|
||||||
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
|
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
|
||||||
|
@ -330,9 +332,11 @@ public class TestPendingReconstruction {
|
||||||
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
|
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
|
||||||
poolId);
|
poolId);
|
||||||
StorageReceivedDeletedBlocks[] report =
|
StorageReceivedDeletedBlocks[] report =
|
||||||
{ new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
|
{ new StorageReceivedDeletedBlocks(
|
||||||
new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
|
new DatanodeStorage("Fake-storage-ID-Ignored"),
|
||||||
blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
|
new ReceivedDeletedBlockInfo[] {
|
||||||
|
new ReceivedDeletedBlockInfo(
|
||||||
|
blocks[0], BlockStatus.RECEIVED_BLOCK, "")}) };
|
||||||
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
|
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
|
||||||
reportDnNum++;
|
reportDnNum++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.*;
|
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -172,7 +173,8 @@ public class TestIncrementalBrVariations {
|
||||||
|
|
||||||
assertTrue(foundBlockOnStorage);
|
assertTrue(foundBlockOnStorage);
|
||||||
reports[i] =
|
reports[i] =
|
||||||
new StorageReceivedDeletedBlocks(volume.getStorageID(), rdbi);
|
new StorageReceivedDeletedBlocks(
|
||||||
|
new DatanodeStorage(volume.getStorageID()), rdbi);
|
||||||
|
|
||||||
if (splitReports) {
|
if (splitReports) {
|
||||||
// If we are splitting reports then send the report for this storage now.
|
// If we are splitting reports then send the report for this storage now.
|
||||||
|
|
|
@ -1037,7 +1037,7 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
|
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
|
||||||
null) };
|
null) };
|
||||||
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
|
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
|
||||||
targetStorageID, rdBlocks) };
|
new DatanodeStorage(targetStorageID), rdBlocks) };
|
||||||
dataNodeProto.blockReceivedAndDeleted(receivedDNReg, bpid, report);
|
dataNodeProto.blockReceivedAndDeleted(receivedDNReg, bpid, report);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1157,7 +1157,8 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
loc.getBlock().getLocalBlock(),
|
loc.getBlock().getLocalBlock(),
|
||||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
|
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
|
||||||
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
|
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
|
||||||
datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
|
new DatanodeStorage(datanodes[dnIdx].storage.getStorageID()),
|
||||||
|
rdBlocks) };
|
||||||
dataNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration,
|
dataNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration,
|
||||||
bpid, report);
|
bpid, report);
|
||||||
}
|
}
|
||||||
|
|
|
@ -102,7 +102,8 @@ public class TestDeadDatanode {
|
||||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
|
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
|
||||||
null) };
|
null) };
|
||||||
StorageReceivedDeletedBlocks[] storageBlocks = {
|
StorageReceivedDeletedBlocks[] storageBlocks = {
|
||||||
new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(), blocks) };
|
new StorageReceivedDeletedBlocks(
|
||||||
|
new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
|
||||||
|
|
||||||
// Ensure blockReceived call from dead datanode is not rejected with
|
// Ensure blockReceived call from dead datanode is not rejected with
|
||||||
// IOException, since it's async, but the node remains unregistered.
|
// IOException, since it's async, but the node remains unregistered.
|
||||||
|
|
Loading…
Reference in New Issue