svn merge -c 1579670 from trunk for HDFS-6129. When a replica is not found for deletion, do not throw an exception.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1579671 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-03-20 15:51:28 +00:00
parent d955e84f39
commit f94a9c0bd0
2 changed files with 22 additions and 17 deletions

View File

@ -180,6 +180,9 @@ Release 2.4.0 - UNRELEASED
HDFS-6123. Do not log stack trace for ReplicaAlreadyExistsException and
SocketTimeoutException. (szetszwo)
HDFS-6129. When a replica is not found for deletion, do not throw an
exception. (szetszwo)
OPTIMIZATIONS
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery

View File

@ -1152,43 +1152,39 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
*/
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
boolean error = false;
final List<String> errors = new ArrayList<String>();
for (int i = 0; i < invalidBlks.length; i++) {
final File f;
final FsVolumeImpl v;
synchronized (this) {
f = getFile(bpid, invalidBlks[i].getBlockId());
ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
if (info == null) {
LOG.warn("Failed to delete replica " + invalidBlks[i]
// It is okay if the block is not found -- it may be deleted earlier.
LOG.info("Failed to delete replica " + invalidBlks[i]
+ ": ReplicaInfo not found.");
error = true;
continue;
}
if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
LOG.warn("Failed to delete replica " + invalidBlks[i]
errors.add("Failed to delete replica " + invalidBlks[i]
+ ": GenerationStamp not matched, info=" + info);
error = true;
continue;
}
f = info.getBlockFile();
v = (FsVolumeImpl)info.getVolume();
if (f == null) {
LOG.warn("Failed to delete replica " + invalidBlks[i]
errors.add("Failed to delete replica " + invalidBlks[i]
+ ": File not found, volume=" + v);
error = true;
continue;
}
if (v == null) {
LOG.warn("Failed to delete replica " + invalidBlks[i]
+ ". No volume for this replica, file=" + f + ".");
error = true;
errors.add("Failed to delete replica " + invalidBlks[i]
+ ". No volume for this replica, file=" + f);
continue;
}
File parent = f.getParentFile();
if (parent == null) {
LOG.warn("Failed to delete replica " + invalidBlks[i]
+ ". Parent not found for file " + f + ".");
error = true;
errors.add("Failed to delete replica " + invalidBlks[i]
+ ". Parent not found for file " + f);
continue;
}
ReplicaState replicaState = info.getState();
@ -1210,8 +1206,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
new ExtendedBlock(bpid, invalidBlks[i]),
dataStorage.getTrashDirectoryForBlockFile(bpid, f));
}
if (error) {
throw new IOException("Error in deleting blocks.");
if (!errors.isEmpty()) {
StringBuilder b = new StringBuilder("Failed to delete ")
.append(errors.size()).append(" (out of ").append(invalidBlks.length)
.append(") replica(s):");
for(int i = 0; i < errors.size(); i++) {
b.append("\n").append(i).append(") ").append(errors.get(i));
}
throw new IOException(b.toString());
}
}