HDFS-13958. Miscellaneous Improvements for FsVolumeSpi. Contributed by BELUGA BEHR.

This commit is contained in:
Inigo Goiri 2018-10-05 09:32:19 -07:00
parent f13e231025
commit 73c660b43f
7 changed files with 254 additions and 297 deletions

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@ -26,7 +24,6 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -46,7 +43,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
@ -657,7 +653,7 @@ public class DirectoryScanner implements Runnable {
perfTimer.start();
throttleTimer.start();
for (String bpid : bpList) {
LinkedList<ScanInfo> report = new LinkedList<>();
List<ScanInfo> report = new ArrayList<>(DEFAULT_MAP_SIZE);
perfTimer.reset().start();
throttleTimer.reset().start();
@ -720,16 +716,4 @@ public class DirectoryScanner implements Runnable {
perfTimer.reset().start();
}
}
public enum BlockDirFilter implements FilenameFilter {
INSTANCE;
@Override
public boolean accept(File dir, String name) {
return name.startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)
|| name.startsWith(DataStorage.STORAGE_DIR_FINALIZED)
|| name.startsWith(Block.BLOCK_FILE_PREFIX);
}
}
}

View File

@ -22,7 +22,7 @@ import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.channels.ClosedChannelException;
import java.util.LinkedList;
import java.util.Collection;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -32,9 +32,9 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.common.FileRegion;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
@ -362,13 +362,13 @@ public interface FsVolumeSpi
public File getMetaFile() {
if (metaSuffix == null) {
return null;
} else if (blockSuffix == null) {
return new File(new File(volume.getBaseURI()).getAbsolutePath(),
metaSuffix);
} else {
return new File(new File(volume.getBaseURI()).getAbsolutePath(),
blockSuffix + metaSuffix);
}
String fileSuffix = metaSuffix;
if (blockSuffix != null) {
fileSuffix = blockSuffix + metaSuffix;
}
return new File(new File(volume.getBaseURI()).getAbsolutePath(),
fileSuffix);
}
/**
@ -389,18 +389,12 @@ public interface FsVolumeSpi
return volume;
}
@Override // Comparable
@Override
public int compareTo(ScanInfo b) {
if (blockId < b.blockId) {
return -1;
} else if (blockId == b.blockId) {
return 0;
} else {
return 1;
}
return Long.compare(this.blockId, b.blockId);
}
@Override // Object
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
@ -411,9 +405,9 @@ public interface FsVolumeSpi
return blockId == ((ScanInfo) o).blockId;
}
@Override // Object
@Override
public int hashCode() {
return (int)(blockId^(blockId>>>32));
return Long.hashCode(this.blockId);
}
public long getGenStamp() {
@ -447,8 +441,8 @@ public interface FsVolumeSpi
* @param reportCompiler
* @throws IOException
*/
LinkedList<ScanInfo> compileReport(String bpid,
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
void compileReport(String bpid,
Collection<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException, IOException;
/**

View File

@ -28,8 +28,8 @@ import java.net.URI;
import java.nio.channels.ClosedChannelException;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@ -46,38 +46,37 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.BlockDirFilter;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.CloseableReferenceCount;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Timer;
import org.slf4j.Logger;
@ -311,11 +310,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
*/
boolean checkClosed() {
if (this.reference.getReferenceCount() > 0) {
if (FsDatasetImpl.LOG.isDebugEnabled()) {
FsDatasetImpl.LOG.debug(String.format(
"The reference count for %s is %d, wait to be 0.",
this, reference.getReferenceCount()));
}
FsDatasetImpl.LOG.debug("The reference count for {} is {}, wait to be 0.",
this, reference.getReferenceCount());
return false;
}
return true;
@ -381,7 +377,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
@VisibleForTesting
public long getDfsUsed() throws IOException {
long dfsUsed = 0;
for(BlockPoolSlice s : bpSlices.values()) {
for (BlockPoolSlice s : bpSlices.values()) {
dfsUsed += s.getDfsUsed();
}
return dfsUsed;
@ -400,11 +396,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
*/
@VisibleForTesting
public long getCapacity() {
if (configuredCapacity < 0) {
if (configuredCapacity < 0L) {
long remaining = usage.getCapacity() - getReserved();
return remaining > 0 ? remaining : 0;
return Math.max(remaining, 0L);
}
return configuredCapacity;
}
@ -418,9 +413,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
this.configuredCapacity = capacity;
}
/*
/**
* Calculate the available space of the filesystem, excluding space reserved
* for non-HDFS and space reserved for RBW
* for non-HDFS and space reserved for RBW.
*
* @return the available number of bytes left in this filesystem. May be zero.
*/
@ -432,7 +427,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
if (remaining > available) {
remaining = available;
}
return (remaining > 0) ? remaining : 0;
return Math.max(remaining, 0L);
}
long getActualNonDfsUsed() throws IOException {
@ -458,10 +453,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
public long getNonDfsUsed() throws IOException {
long actualNonDfsUsed = getActualNonDfsUsed();
long actualReserved = getReserved();
if (actualNonDfsUsed < actualReserved) {
return 0L;
}
return actualNonDfsUsed - actualReserved;
long nonDfsUsed = actualNonDfsUsed - actualReserved;
return Math.max(nonDfsUsed, 0L);
}
@VisibleForTesting
@ -503,7 +496,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
try {
return new DF(new File(currentDir.getParent()), conf);
} catch (IOException e) {
LOG.error("Unable to get disk statistics for volume " + this);
LOG.error("Unable to get disk statistics for volume {}", this, e);
}
}
return null;
@ -525,11 +518,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
/**
* Make a deep copy of the list of currently active BPIDs
* Make a deep copy of the list of currently active BPIDs.
*/
@Override
public String[] getBlockPoolList() {
return bpSlices.keySet().toArray(new String[bpSlices.keySet().size()]);
return bpSlices.keySet().toArray(new String[0]);
}
/**
@ -549,7 +542,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
@Override
public void reserveSpaceForReplica(long bytesToReserve) {
if (bytesToReserve != 0) {
if (bytesToReserve != 0L) {
reservedForReplicas.addAndGet(bytesToReserve);
recentReserved = bytesToReserve;
}
@ -557,17 +550,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
@Override
public void releaseReservedSpace(long bytesToRelease) {
if (bytesToRelease != 0) {
if (bytesToRelease != 0L) {
long oldReservation, newReservation;
do {
oldReservation = reservedForReplicas.get();
newReservation = oldReservation - bytesToRelease;
if (newReservation < 0) {
// Failsafe, this should never occur in practice, but if it does we
// don't want to start advertising more space than we have available.
newReservation = 0;
}
// Fail-safe, this should never be less than zero in practice, but if it
// does, do not advertise more space than is have available.
newReservation = Math.max(newReservation, 0L);
} while (!reservedForReplicas.compareAndSet(oldReservation,
newReservation));
}
@ -679,20 +670,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
FsVolumeImpl.this, dir, SubdirFilter.INSTANCE);
cache = null;
cacheMs = 0;
if (children.size() == 0) {
if (children.isEmpty()) {
LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}",
storageID, bpid, dir.getAbsolutePath());
return null;
}
Collections.sort(children);
String nextSubDir = nextSorted(children, prev);
if (nextSubDir == null) {
LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}",
storageID, bpid, dir.getAbsolutePath());
} else {
LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " +
"within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath());
}
LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} within {}",
storageID, bpid, nextSubDir, dir.getAbsolutePath());
return nextSubDir;
}
@ -731,15 +717,12 @@ public class FsVolumeImpl implements FsVolumeSpi {
state.curFinalizedDir, state.curFinalizedSubDir).toFile();
List<String> entries = fileIoProvider.listDirectory(
FsVolumeImpl.this, dir, BlockFileFilter.INSTANCE);
if (entries.size() == 0) {
if (entries.isEmpty()) {
entries = null;
LOG.trace("getSubdirEntries({}, {}): no entries found in {}", storageID,
bpid, dir.getAbsolutePath());
} else {
Collections.sort(entries);
}
if (entries == null) {
LOG.trace("getSubdirEntries({}, {}): no entries found in {}",
storageID, bpid, dir.getAbsolutePath());
} else {
LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}",
storageID, bpid, entries.size(), dir.getAbsolutePath());
}
@ -872,10 +855,12 @@ public class FsVolumeImpl implements FsVolumeSpi {
public void load() throws IOException {
File file = getSaveFile();
this.state = READER.readValue(file);
if (LOG.isTraceEnabled()) {
LOG.trace("load({}, {}): loaded iterator {} from {}: {}", storageID,
bpid, name, file.getAbsoluteFile(),
WRITER.writeValueAsString(state));
}
}
File getSaveFile() {
return new File(bpidDir, name + ".cursor");
@ -956,15 +941,21 @@ public class FsVolumeImpl implements FsVolumeSpi {
long bytesReserved) throws IOException {
releaseReservedSpace(bytesReserved);
File dest = getBlockPoolSlice(bpid).addFinalizedBlock(b, replicaInfo);
byte[] checksum = null;
final byte[] checksum;
// copy the last partial checksum if the replica is originally
// in finalized or rbw state.
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
FinalizedReplica finalized = (FinalizedReplica)replicaInfo;
switch (replicaInfo.getState()) {
case FINALIZED:
FinalizedReplica finalized = (FinalizedReplica) replicaInfo;
checksum = finalized.getLastPartialChunkChecksum();
} else if (replicaInfo.getState() == ReplicaState.RBW) {
ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
break;
case RBW:
ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaInfo;
checksum = rbw.getLastChecksumAndDataLen().getChecksum();
break;
default:
checksum = null;
break;
}
return new ReplicaBuilder(ReplicaState.FINALIZED)
@ -983,28 +974,26 @@ public class FsVolumeImpl implements FsVolumeSpi {
public VolumeCheckResult check(VolumeCheckContext ignored)
throws DiskErrorException {
// TODO:FEDERATION valid synchronization
for(BlockPoolSlice s : bpSlices.values()) {
for (BlockPoolSlice s : bpSlices.values()) {
s.checkDirs();
}
return VolumeCheckResult.HEALTHY;
}
void getVolumeMap(ReplicaMap volumeMap,
final RamDiskReplicaTracker ramDiskReplicaMap)
throws IOException {
for(BlockPoolSlice s : bpSlices.values()) {
final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
for (BlockPoolSlice s : bpSlices.values()) {
s.getVolumeMap(volumeMap, ramDiskReplicaMap);
}
}
void getVolumeMap(String bpid, ReplicaMap volumeMap,
final RamDiskReplicaTracker ramDiskReplicaMap)
throws IOException {
final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
}
long getNumBlocks() {
long numBlocks = 0;
long numBlocks = 0L;
for (BlockPoolSlice s : bpSlices.values()) {
numBlocks += s.getNumOfBlocks();
}
@ -1038,10 +1027,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
File bpdir = new File(currentDir, bpid);
BlockPoolSlice bp;
if (timer == null) {
bp = new BlockPoolSlice(bpid, this, bpdir, c, new Timer());
} else {
bp = new BlockPoolSlice(bpid, this, bpdir, c, timer);
timer = new Timer();
}
bp = new BlockPoolSlice(bpid, this, bpdir, c, timer);
bpSlices.put(bpid, bp);
}
@ -1137,7 +1125,6 @@ public class FsVolumeImpl implements FsVolumeSpi {
return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, storageType);
}
@Override
public byte[] loadLastPartialChunkChecksum(
File blockFile, File metaFile) throws IOException {
@ -1313,11 +1300,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
@Override
public LinkedList<ScanInfo> compileReport(String bpid,
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException, IOException {
return compileReport(getFinalizedDir(bpid),
getFinalizedDir(bpid), report, reportCompiler);
public void compileReport(String bpid, Collection<ScanInfo> report,
ReportCompiler reportCompiler) throws InterruptedException, IOException {
compileReport(getFinalizedDir(bpid), getFinalizedDir(bpid), report,
reportCompiler);
}
@Override
@ -1330,21 +1316,35 @@ public class FsVolumeImpl implements FsVolumeSpi {
return metrics;
}
private LinkedList<ScanInfo> compileReport(File bpFinalizedDir,
File dir, LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
/**
* Filter for block file names stored on the file system volumes.
*/
public enum BlockDirFilter implements FilenameFilter {
INSTANCE;
@Override
public boolean accept(File dir, String name) {
return name.startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)
|| name.startsWith(DataStorage.STORAGE_DIR_FINALIZED)
|| name.startsWith(Block.BLOCK_FILE_PREFIX);
}
}
private void compileReport(File bpFinalizedDir, File dir,
Collection<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException {
reportCompiler.throttle();
List <String> fileNames;
try {
fileNames = fileIoProvider.listDirectory(
this, dir, BlockDirFilter.INSTANCE);
fileNames =
fileIoProvider.listDirectory(this, dir, BlockDirFilter.INSTANCE);
} catch (IOException ioe) {
LOG.warn("Exception occurred while compiling report: ", ioe);
LOG.warn("Exception occurred while compiling report", ioe);
// Volume error check moved to FileIoProvider.
// Ignore this directory and proceed.
return report;
return;
}
Collections.sort(fileNames);
@ -1396,7 +1396,6 @@ public class FsVolumeImpl implements FsVolumeSpi {
verifyFileLocation(blockFile, bpFinalizedDir, blockId);
report.add(new ScanInfo(blockId, blockFile, metaFile, this));
}
return report;
}
/**

View File

@ -17,15 +17,17 @@
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
@ -41,21 +43,23 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.FileRegion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
import org.apache.hadoop.util.Timer;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Timer;
import org.codehaus.jackson.annotate.JsonProperty;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
@ -63,11 +67,6 @@ import org.codehaus.jackson.map.ObjectWriter;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES;
/**
* This class is used to create provided volumes.
*/
@ -227,7 +226,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
// nothing to do!
}
public void compileReport(LinkedList<ScanInfo> report,
public void compileReport(Collection<ScanInfo> report,
ReportCompiler reportCompiler)
throws IOException, InterruptedException {
/* refresh the aliasMap and return the list of blocks found.
@ -240,9 +239,8 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
BlockAliasMap.Reader<FileRegion> reader = aliasMap.getReader(null, bpid);
for (FileRegion region : reader) {
reportCompiler.throttle();
report.add(new ScanInfo(region.getBlock().getBlockId(),
providedVolume, region,
region.getProvidedStorageLocation().getLength()));
report.add(new ScanInfo(region.getBlock().getBlockId(), providedVolume,
region, region.getProvidedStorageLocation().getLength()));
}
}
@ -336,7 +334,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
@Override
long getNumBlocks() {
long numBlocks = 0;
long numBlocks = 0L;
for (ProvidedBlockPoolSlice s : bpSlices.values()) {
numBlocks += s.getNumOfBlocks();
}
@ -381,7 +379,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
iterStartMs = Time.now();
lastSavedMs = iterStartMs;
atEnd = false;
lastBlockId = -1;
lastBlockId = -1L;
}
// The wall-clock ms since the epoch at which this iterator was last saved.
@ -533,7 +531,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
final RamDiskReplicaTracker ramDiskReplicaMap)
throws IOException {
LOG.info("Creating volumemap for provided volume " + this);
for(ProvidedBlockPoolSlice s : bpSlices.values()) {
for (ProvidedBlockPoolSlice s : bpSlices.values()) {
s.fetchVolumeMap(volumeMap, ramDiskReplicaMap, remoteFS);
}
}
@ -611,14 +609,12 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
}
@Override
public LinkedList<ScanInfo> compileReport(String bpid,
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException, IOException {
LOG.info("Compiling report for volume: " + this + " bpid " + bpid);
if(bpSlices.containsKey(bpid)) {
public void compileReport(String bpid, Collection<ScanInfo> report,
ReportCompiler reportCompiler) throws InterruptedException, IOException {
LOG.info("Compiling report for volume: {}; bpid: {}", this, bpid);
if (bpSlices.containsKey(bpid)) {
bpSlices.get(bpid).compileReport(report, reportCompiler);
}
return report;
}
@Override

View File

@ -25,10 +25,11 @@ import java.net.URI;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
@ -183,8 +184,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
private boolean pinned = false;
BInfo(String bpid, Block b, boolean forWriting) throws IOException {
theBlock = new Block(b);
if (theBlock.getNumBytes() < 0) {
theBlock.setNumBytes(0);
if (theBlock.getNumBytes() < 0L) {
theBlock.setNumBytes(0L);
}
if (!getStorage(theBlock).alloc(bpid, theBlock.getNumBytes())) {
// expected length - actual length may
@ -260,7 +261,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
// We had allocated the expected length when block was created;
// adjust if necessary
long extraLen = finalSize - theBlock.getNumBytes();
if (extraLen > 0) {
if (extraLen > 0L) {
if (!getStorage(theBlock).alloc(bpid, extraLen)) {
DataNode.LOG.warn("Lack of free storage on a block alloc");
throw new IOException("Creating block, no free space available");
@ -402,7 +403,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
* to {@link BlockPoolSlice}
*/
private static class SimulatedBPStorage {
private long used; // in bytes
// in bytes
private long used;
private final Map<Block, BInfo> blockMap = new TreeMap<>();
long getUsed() {
@ -422,7 +424,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
SimulatedBPStorage() {
used = 0;
used = 0L;
}
}
@ -447,7 +449,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
synchronized long getUsed() {
long used = 0;
long used = 0L;
for (SimulatedBPStorage bpStorage : map.values()) {
used += bpStorage.getUsed();
}
@ -635,10 +637,9 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
@Override
public LinkedList<ScanInfo> compileReport(String bpid,
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
public void compileReport(String bpid,
Collection<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException, IOException {
return null;
}
@Override
@ -662,7 +663,6 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
private final String datanodeUuid;
private final DataNode datanode;
public SimulatedFSDataset(DataStorage storage, Configuration conf) {
this(null, storage, conf);
}
@ -792,12 +792,12 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FsDatasetSpi
public List<Long> getCacheReport(String bpid) {
return new LinkedList<Long>();
return Collections.emptyList();
}
@Override // FSDatasetMBean
public long getCapacity() {
long total = 0;
long total = 0L;
for (SimulatedStorage storage : storages) {
total += storage.getCapacity();
}
@ -806,7 +806,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FSDatasetMBean
public long getDfsUsed() {
long total = 0;
long total = 0L;
for (SimulatedStorage storage : storages) {
total += storage.getUsed();
}
@ -815,7 +815,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FSDatasetMBean
public long getBlockPoolUsed(String bpid) throws IOException {
long total = 0;
long total = 0L;
for (SimulatedStorage storage : storages) {
total += storage.getBlockPoolUsed(bpid);
}
@ -824,8 +824,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FSDatasetMBean
public long getRemaining() {
long total = 0;
long total = 0L;
for (SimulatedStorage storage : storages) {
total += storage.getFree();
}
@ -834,7 +833,6 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FSDatasetMBean
public int getNumFailedVolumes() {
int total = 0;
for (SimulatedStorage storage : storages) {
total += storage.getNumFailedVolumes();
@ -849,12 +847,12 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FSDatasetMBean
public long getLastVolumeFailureDate() {
return 0;
return 0L;
}
@Override // FSDatasetMBean
public long getEstimatedCapacityLostTotal() {
return 0;
return 0L;
}
@Override // FsDatasetSpi
@ -864,27 +862,27 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FSDatasetMBean
public long getCacheUsed() {
return 0l;
return 0L;
}
@Override // FSDatasetMBean
public long getCacheCapacity() {
return 0l;
return 0L;
}
@Override // FSDatasetMBean
public long getNumBlocksCached() {
return 0l;
return 0L;
}
@Override
public long getNumBlocksFailedToCache() {
return 0l;
return 0L;
}
@Override
public long getNumBlocksFailedToUncache() {
return 0l;
return 0L;
}
/**
@ -931,7 +929,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
} catch (IOException ioe) {
// Ignore
}
return r == null? "null": r.toString();
return Objects.toString(r);
}
@Override // FsDatasetSpi
@ -1013,8 +1011,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
* @param state If this is null, it is ignored. If it is non-null, we will
* check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
@ -1157,9 +1155,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
throws IOException {
BInfo binfo = getBlockMap(b).get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
throw new IOException("No such Block " + b);
}
return binfo.getIStream();
}
@ -1183,7 +1180,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
) throws IOException {
BInfo binfo = getBlockMap(b).get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
throw new IOException("No such Block " + b);
}
if (!binfo.finalized) {
throw new IOException("Block " + b +
@ -1199,14 +1196,11 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FsDatasetSpi
public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams stream,
int checksumSize)
throws IOException {
ReplicaOutputStreams stream, int checksumSize) throws IOException {
}
/**
* Simulated input and output streams
*
* Simulated input and output streams.
*/
static private class SimulatedInputStream extends java.io.InputStream {
final long length; // bytes
@ -1215,7 +1209,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
Block theBlock = null;
/**
* An input stream of size l with repeated bytes
* An input stream of size l with repeated bytes.
* @param l size of the stream
* @param iRepeatedData byte that is repeated in the stream
*/
@ -1254,7 +1248,6 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override
public int read(byte[] b) throws IOException {
if (b == null) {
throw new NullPointerException();
}
@ -1280,7 +1273,6 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
/**
* This class implements an output stream that merely throws its data away, but records its
* length.
*
*/
static private class SimulatedOutputStream extends OutputStream {
long length = 0;
@ -1316,17 +1308,13 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
@Override
public void write(byte[] b,
int off,
int len) throws IOException {
public void write(byte[] b, int off, int len) throws IOException {
length += len;
}
}
private ObjectName mbeanName;
/**
* Register the FSDataset MBean using the name
* "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
@ -1371,12 +1359,12 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
ExtendedBlock b = rBlock.getBlock();
BInfo binfo = getBlockMap(b).get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
throw new IOException("No such Block " + b);
}
return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(),
binfo.getGenerationStamp(),
binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
binfo.isFinalized() ? ReplicaState.FINALIZED : ReplicaState.RBW);
}
@Override // FsDatasetSpi

View File

@ -35,7 +35,6 @@ import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Executors;
@ -924,10 +923,9 @@ public class TestDirectoryScanner {
}
@Override
public LinkedList<ScanInfo> compileReport(String bpid,
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
public void compileReport(String bpid,
Collection<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException, IOException {
return null;
}
@Override

View File

@ -22,14 +22,14 @@ import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.channels.ClosedChannelException;
import java.util.LinkedList;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@ -117,10 +117,8 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
}
@Override
public LinkedList<ScanInfo> compileReport(String bpid,
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException, IOException {
return null;
public void compileReport(String bpid, Collection<ScanInfo> report,
ReportCompiler reportCompiler) throws InterruptedException, IOException {
}
@Override