HDFS-10638. Modifications to remove the assumption that StorageLocation is associated with java.io.File in Datanode. (Virajith Jalaparti via lei)
This commit is contained in:
parent
1f8490a5ba
commit
f209e93566
|
@ -278,7 +278,7 @@ public abstract class Storage extends StorageInfo {
|
||||||
|
|
||||||
public StorageDirectory(StorageLocation location) {
|
public StorageDirectory(StorageLocation location) {
|
||||||
// default dirType is null
|
// default dirType is null
|
||||||
this(location.getFile(), null, false, location);
|
this(null, false, location);
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorageDirectory(File dir, StorageDirType dirType) {
|
public StorageDirectory(File dir, StorageDirType dirType) {
|
||||||
|
@ -304,20 +304,57 @@ public abstract class Storage extends StorageInfo {
|
||||||
this(dir, dirType, isShared, null);
|
this(dir, dirType, isShared, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorageDirectory(File dir, StorageDirType dirType,
|
/**
|
||||||
|
* Constructor
|
||||||
|
* @param dirType storage directory type
|
||||||
|
* @param isShared whether or not this dir is shared between two NNs. true
|
||||||
|
* disables locking on the storage directory, false enables locking
|
||||||
|
* @param location the {@link StorageLocation} for this directory
|
||||||
|
*/
|
||||||
|
public StorageDirectory(StorageDirType dirType, boolean isShared,
|
||||||
|
StorageLocation location) {
|
||||||
|
this(getStorageLocationFile(location), dirType, isShared, location);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor
|
||||||
|
* @param bpid the block pool id
|
||||||
|
* @param dirType storage directory type
|
||||||
|
* @param isShared whether or not this dir is shared between two NNs. true
|
||||||
|
* disables locking on the storage directory, false enables locking
|
||||||
|
* @param location the {@link StorageLocation} for this directory
|
||||||
|
*/
|
||||||
|
public StorageDirectory(String bpid, StorageDirType dirType,
|
||||||
|
boolean isShared, StorageLocation location) {
|
||||||
|
this(new File(location.getBpURI(bpid, STORAGE_DIR_CURRENT)), dirType,
|
||||||
|
isShared, location);
|
||||||
|
}
|
||||||
|
|
||||||
|
private StorageDirectory(File dir, StorageDirType dirType,
|
||||||
boolean isShared, StorageLocation location) {
|
boolean isShared, StorageLocation location) {
|
||||||
this.root = dir;
|
this.root = dir;
|
||||||
this.lock = null;
|
this.lock = null;
|
||||||
this.dirType = dirType;
|
this.dirType = dirType;
|
||||||
this.isShared = isShared;
|
this.isShared = isShared;
|
||||||
this.location = location;
|
this.location = location;
|
||||||
assert location == null ||
|
assert location == null || dir == null ||
|
||||||
dir.getAbsolutePath().startsWith(
|
dir.getAbsolutePath().startsWith(
|
||||||
location.getFile().getAbsolutePath()):
|
new File(location.getUri()).getAbsolutePath()):
|
||||||
"The storage location and directory should be equal";
|
"The storage location and directory should be equal";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static File getStorageLocationFile(StorageLocation location) {
|
||||||
|
if (location == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
return new File(location.getUri());
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
//if location does not refer to a File
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get root directory of this storage
|
* Get root directory of this storage
|
||||||
*/
|
*/
|
||||||
|
@ -932,6 +969,41 @@ public abstract class Storage extends StorageInfo {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the storage directory on the given directory is already
|
||||||
|
* loaded.
|
||||||
|
* @param location the {@link StorageLocation}
|
||||||
|
* @throws IOException if failed to get canonical path.
|
||||||
|
*/
|
||||||
|
protected boolean containsStorageDir(StorageLocation location)
|
||||||
|
throws IOException {
|
||||||
|
for (StorageDirectory sd : storageDirs) {
|
||||||
|
if (location.matchesStorageDirectory(sd)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the storage directory on the given location is already
|
||||||
|
* loaded.
|
||||||
|
* @param location the {@link StorageLocation}
|
||||||
|
* @param bpid the block pool id
|
||||||
|
* @return true if the location matches to any existing storage directories
|
||||||
|
* @throws IOException IOException if failed to read location
|
||||||
|
* or storage directory path
|
||||||
|
*/
|
||||||
|
protected boolean containsStorageDir(StorageLocation location, String bpid)
|
||||||
|
throws IOException {
|
||||||
|
for (StorageDirectory sd : storageDirs) {
|
||||||
|
if (location.matchesStorageDirectory(sd, bpid)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return true if the layout of the given storage directory is from a version
|
* Return true if the layout of the given storage directory is from a version
|
||||||
* of Hadoop prior to the introduction of the "current" and "previous"
|
* of Hadoop prior to the introduction of the "current" and "previous"
|
||||||
|
|
|
@ -22,7 +22,6 @@ import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -147,10 +146,11 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
|
private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
|
||||||
File dataDir, StorageLocation location, StartupOption startOpt,
|
StorageLocation location, StartupOption startOpt,
|
||||||
List<Callable<StorageDirectory>> callables, Configuration conf)
|
List<Callable<StorageDirectory>> callables, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
StorageDirectory sd = new StorageDirectory(dataDir, null, true, location);
|
StorageDirectory sd = new StorageDirectory(
|
||||||
|
nsInfo.getBlockPoolID(), null, true, location);
|
||||||
try {
|
try {
|
||||||
StorageState curState = sd.analyzeStorage(startOpt, this, true);
|
StorageState curState = sd.analyzeStorage(startOpt, this, true);
|
||||||
// sd is locked but not opened
|
// sd is locked but not opened
|
||||||
|
@ -158,11 +158,15 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
case NORMAL:
|
case NORMAL:
|
||||||
break;
|
break;
|
||||||
case NON_EXISTENT:
|
case NON_EXISTENT:
|
||||||
LOG.info("Block pool storage directory " + dataDir + " does not exist");
|
LOG.info("Block pool storage directory for location " + location +
|
||||||
throw new IOException("Storage directory " + dataDir
|
" and block pool id " + nsInfo.getBlockPoolID() +
|
||||||
+ " does not exist");
|
" does not exist");
|
||||||
|
throw new IOException("Storage directory for location " + location +
|
||||||
|
" and block pool id " + nsInfo.getBlockPoolID() +
|
||||||
|
" does not exist");
|
||||||
case NOT_FORMATTED: // format
|
case NOT_FORMATTED: // format
|
||||||
LOG.info("Block pool storage directory " + dataDir
|
LOG.info("Block pool storage directory for location " + location +
|
||||||
|
" and block pool id " + nsInfo.getBlockPoolID()
|
||||||
+ " is not formatted for " + nsInfo.getBlockPoolID()
|
+ " is not formatted for " + nsInfo.getBlockPoolID()
|
||||||
+ ". Formatting ...");
|
+ ". Formatting ...");
|
||||||
format(sd, nsInfo);
|
format(sd, nsInfo);
|
||||||
|
@ -208,21 +212,19 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
List<StorageDirectory> loadBpStorageDirectories(NamespaceInfo nsInfo,
|
List<StorageDirectory> loadBpStorageDirectories(NamespaceInfo nsInfo,
|
||||||
Collection<File> dataDirs, StorageLocation location,
|
StorageLocation location, StartupOption startOpt,
|
||||||
StartupOption startOpt, List<Callable<StorageDirectory>> callables,
|
List<Callable<StorageDirectory>> callables, Configuration conf)
|
||||||
Configuration conf) throws IOException {
|
throws IOException {
|
||||||
List<StorageDirectory> succeedDirs = Lists.newArrayList();
|
List<StorageDirectory> succeedDirs = Lists.newArrayList();
|
||||||
try {
|
try {
|
||||||
for (File dataDir : dataDirs) {
|
if (containsStorageDir(location, nsInfo.getBlockPoolID())) {
|
||||||
if (containsStorageDir(dataDir)) {
|
throw new IOException(
|
||||||
throw new IOException(
|
"BlockPoolSliceStorage.recoverTransitionRead: " +
|
||||||
"BlockPoolSliceStorage.recoverTransitionRead: " +
|
"attempt to load an used block storage: " + location);
|
||||||
"attempt to load an used block storage: " + dataDir);
|
|
||||||
}
|
|
||||||
final StorageDirectory sd = loadStorageDirectory(
|
|
||||||
nsInfo, dataDir, location, startOpt, callables, conf);
|
|
||||||
succeedDirs.add(sd);
|
|
||||||
}
|
}
|
||||||
|
final StorageDirectory sd = loadStorageDirectory(
|
||||||
|
nsInfo, location, startOpt, callables, conf);
|
||||||
|
succeedDirs.add(sd);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Failed to analyze storage directories for block pool "
|
LOG.warn("Failed to analyze storage directories for block pool "
|
||||||
+ nsInfo.getBlockPoolID(), e);
|
+ nsInfo.getBlockPoolID(), e);
|
||||||
|
@ -244,12 +246,12 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
|
List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
|
||||||
Collection<File> dataDirs, StorageLocation location,
|
StorageLocation location, StartupOption startOpt,
|
||||||
StartupOption startOpt, List<Callable<StorageDirectory>> callables,
|
List<Callable<StorageDirectory>> callables, Configuration conf)
|
||||||
Configuration conf) throws IOException {
|
throws IOException {
|
||||||
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
|
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
|
||||||
final List<StorageDirectory> loaded = loadBpStorageDirectories(
|
final List<StorageDirectory> loaded = loadBpStorageDirectories(
|
||||||
nsInfo, dataDirs, location, startOpt, callables, conf);
|
nsInfo, location, startOpt, callables, conf);
|
||||||
for (StorageDirectory sd : loaded) {
|
for (StorageDirectory sd : loaded) {
|
||||||
addStorageDir(sd);
|
addStorageDir(sd);
|
||||||
}
|
}
|
||||||
|
|
|
@ -648,7 +648,7 @@ public class DataNode extends ReconfigurableBase
|
||||||
// Use the existing StorageLocation to detect storage type changes.
|
// Use the existing StorageLocation to detect storage type changes.
|
||||||
Map<String, StorageLocation> existingLocations = new HashMap<>();
|
Map<String, StorageLocation> existingLocations = new HashMap<>();
|
||||||
for (StorageLocation loc : getStorageLocations(getConf())) {
|
for (StorageLocation loc : getStorageLocations(getConf())) {
|
||||||
existingLocations.put(loc.getFile().getCanonicalPath(), loc);
|
existingLocations.put(loc.getNormalizedUri().toString(), loc);
|
||||||
}
|
}
|
||||||
|
|
||||||
ChangedVolumes results = new ChangedVolumes();
|
ChangedVolumes results = new ChangedVolumes();
|
||||||
|
@ -661,11 +661,10 @@ public class DataNode extends ReconfigurableBase
|
||||||
for (Iterator<StorageLocation> sl = results.newLocations.iterator();
|
for (Iterator<StorageLocation> sl = results.newLocations.iterator();
|
||||||
sl.hasNext(); ) {
|
sl.hasNext(); ) {
|
||||||
StorageLocation location = sl.next();
|
StorageLocation location = sl.next();
|
||||||
if (location.getFile().getCanonicalPath().equals(
|
if (location.matchesStorageDirectory(dir)) {
|
||||||
dir.getRoot().getCanonicalPath())) {
|
|
||||||
sl.remove();
|
sl.remove();
|
||||||
StorageLocation old = existingLocations.get(
|
StorageLocation old = existingLocations.get(
|
||||||
location.getFile().getCanonicalPath());
|
location.getNormalizedUri().toString());
|
||||||
if (old != null &&
|
if (old != null &&
|
||||||
old.getStorageType() != location.getStorageType()) {
|
old.getStorageType() != location.getStorageType()) {
|
||||||
throw new IOException("Changing storage type is not allowed.");
|
throw new IOException("Changing storage type is not allowed.");
|
||||||
|
@ -2676,7 +2675,7 @@ public class DataNode extends ReconfigurableBase
|
||||||
locations.add(location);
|
locations.add(location);
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " "
|
LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " "
|
||||||
+ location.getFile() + " : ", ioe);
|
+ location + " : ", ioe);
|
||||||
invalidDirs.append("\"").append(uri.getPath()).append("\" ");
|
invalidDirs.append("\"").append(uri.getPath()).append("\" ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,15 +46,10 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.HardLink;
|
import org.apache.hadoop.fs.HardLink;
|
||||||
import org.apache.hadoop.fs.LocalFileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
|
@ -66,8 +61,6 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.collect.ComparisonChain;
|
import com.google.common.collect.ComparisonChain;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
@ -263,10 +256,9 @@ public class DataStorage extends Storage {
|
||||||
}
|
}
|
||||||
|
|
||||||
private StorageDirectory loadStorageDirectory(DataNode datanode,
|
private StorageDirectory loadStorageDirectory(DataNode datanode,
|
||||||
NamespaceInfo nsInfo, File dataDir, StorageLocation location,
|
NamespaceInfo nsInfo, StorageLocation location, StartupOption startOpt,
|
||||||
StartupOption startOpt, List<Callable<StorageDirectory>> callables)
|
List<Callable<StorageDirectory>> callables) throws IOException {
|
||||||
throws IOException {
|
StorageDirectory sd = new StorageDirectory(null, false, location);
|
||||||
StorageDirectory sd = new StorageDirectory(dataDir, null, false, location);
|
|
||||||
try {
|
try {
|
||||||
StorageState curState = sd.analyzeStorage(startOpt, this, true);
|
StorageState curState = sd.analyzeStorage(startOpt, this, true);
|
||||||
// sd is locked but not opened
|
// sd is locked but not opened
|
||||||
|
@ -274,11 +266,12 @@ public class DataStorage extends Storage {
|
||||||
case NORMAL:
|
case NORMAL:
|
||||||
break;
|
break;
|
||||||
case NON_EXISTENT:
|
case NON_EXISTENT:
|
||||||
LOG.info("Storage directory " + dataDir + " does not exist");
|
LOG.info("Storage directory with location " + location
|
||||||
throw new IOException("Storage directory " + dataDir
|
+ " does not exist");
|
||||||
|
throw new IOException("Storage directory with location " + location
|
||||||
+ " does not exist");
|
+ " does not exist");
|
||||||
case NOT_FORMATTED: // format
|
case NOT_FORMATTED: // format
|
||||||
LOG.info("Storage directory " + dataDir
|
LOG.info("Storage directory with location " + location
|
||||||
+ " is not formatted for namespace " + nsInfo.getNamespaceID()
|
+ " is not formatted for namespace " + nsInfo.getNamespaceID()
|
||||||
+ ". Formatting...");
|
+ ". Formatting...");
|
||||||
format(sd, nsInfo, datanode.getDatanodeUuid());
|
format(sd, nsInfo, datanode.getDatanodeUuid());
|
||||||
|
@ -322,28 +315,22 @@ public class DataStorage extends Storage {
|
||||||
public VolumeBuilder prepareVolume(DataNode datanode,
|
public VolumeBuilder prepareVolume(DataNode datanode,
|
||||||
StorageLocation location, List<NamespaceInfo> nsInfos)
|
StorageLocation location, List<NamespaceInfo> nsInfos)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
File volume = location.getFile();
|
if (containsStorageDir(location)) {
|
||||||
if (containsStorageDir(volume)) {
|
|
||||||
final String errorMessage = "Storage directory is in use";
|
final String errorMessage = "Storage directory is in use";
|
||||||
LOG.warn(errorMessage + ".");
|
LOG.warn(errorMessage + ".");
|
||||||
throw new IOException(errorMessage);
|
throw new IOException(errorMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
StorageDirectory sd = loadStorageDirectory(
|
StorageDirectory sd = loadStorageDirectory(
|
||||||
datanode, nsInfos.get(0), volume, location,
|
datanode, nsInfos.get(0), location, StartupOption.HOTSWAP, null);
|
||||||
StartupOption.HOTSWAP, null);
|
|
||||||
VolumeBuilder builder =
|
VolumeBuilder builder =
|
||||||
new VolumeBuilder(this, sd);
|
new VolumeBuilder(this, sd);
|
||||||
for (NamespaceInfo nsInfo : nsInfos) {
|
for (NamespaceInfo nsInfo : nsInfos) {
|
||||||
List<File> bpDataDirs = Lists.newArrayList();
|
location.makeBlockPoolDir(nsInfo.getBlockPoolID(), null);
|
||||||
bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(
|
|
||||||
nsInfo.getBlockPoolID(), new File(volume, STORAGE_DIR_CURRENT)));
|
|
||||||
makeBlockPoolDataDir(bpDataDirs, null);
|
|
||||||
|
|
||||||
final BlockPoolSliceStorage bpStorage = getBlockPoolSliceStorage(nsInfo);
|
final BlockPoolSliceStorage bpStorage = getBlockPoolSliceStorage(nsInfo);
|
||||||
final List<StorageDirectory> dirs = bpStorage.loadBpStorageDirectories(
|
final List<StorageDirectory> dirs = bpStorage.loadBpStorageDirectories(
|
||||||
nsInfo, bpDataDirs, location, StartupOption.HOTSWAP,
|
nsInfo, location, StartupOption.HOTSWAP, null, datanode.getConf());
|
||||||
null, datanode.getConf());
|
|
||||||
builder.addBpStorageDirectories(nsInfo.getBlockPoolID(), dirs);
|
builder.addBpStorageDirectories(nsInfo.getBlockPoolID(), dirs);
|
||||||
}
|
}
|
||||||
return builder;
|
return builder;
|
||||||
|
@ -405,14 +392,13 @@ public class DataStorage extends Storage {
|
||||||
final List<StorageLocation> success = Lists.newArrayList();
|
final List<StorageLocation> success = Lists.newArrayList();
|
||||||
final List<UpgradeTask> tasks = Lists.newArrayList();
|
final List<UpgradeTask> tasks = Lists.newArrayList();
|
||||||
for (StorageLocation dataDir : dataDirs) {
|
for (StorageLocation dataDir : dataDirs) {
|
||||||
File root = dataDir.getFile();
|
if (!containsStorageDir(dataDir)) {
|
||||||
if (!containsStorageDir(root)) {
|
|
||||||
try {
|
try {
|
||||||
// It first ensures the datanode level format is completed.
|
// It first ensures the datanode level format is completed.
|
||||||
final List<Callable<StorageDirectory>> callables
|
final List<Callable<StorageDirectory>> callables
|
||||||
= Lists.newArrayList();
|
= Lists.newArrayList();
|
||||||
final StorageDirectory sd = loadStorageDirectory(
|
final StorageDirectory sd = loadStorageDirectory(
|
||||||
datanode, nsInfo, root, dataDir, startOpt, callables);
|
datanode, nsInfo, dataDir, startOpt, callables);
|
||||||
if (callables.isEmpty()) {
|
if (callables.isEmpty()) {
|
||||||
addStorageDir(sd);
|
addStorageDir(sd);
|
||||||
success.add(dataDir);
|
success.add(dataDir);
|
||||||
|
@ -455,16 +441,11 @@ public class DataStorage extends Storage {
|
||||||
final List<StorageDirectory> success = Lists.newArrayList();
|
final List<StorageDirectory> success = Lists.newArrayList();
|
||||||
final List<UpgradeTask> tasks = Lists.newArrayList();
|
final List<UpgradeTask> tasks = Lists.newArrayList();
|
||||||
for (StorageLocation dataDir : dataDirs) {
|
for (StorageLocation dataDir : dataDirs) {
|
||||||
final File curDir = new File(dataDir.getFile(), STORAGE_DIR_CURRENT);
|
dataDir.makeBlockPoolDir(bpid, null);
|
||||||
List<File> bpDataDirs = new ArrayList<File>();
|
|
||||||
bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(bpid, curDir));
|
|
||||||
try {
|
try {
|
||||||
makeBlockPoolDataDir(bpDataDirs, null);
|
|
||||||
|
|
||||||
final List<Callable<StorageDirectory>> callables = Lists.newArrayList();
|
final List<Callable<StorageDirectory>> callables = Lists.newArrayList();
|
||||||
final List<StorageDirectory> dirs = bpStorage.recoverTransitionRead(
|
final List<StorageDirectory> dirs = bpStorage.recoverTransitionRead(
|
||||||
nsInfo, bpDataDirs, dataDir, startOpt,
|
nsInfo, dataDir, startOpt, callables, datanode.getConf());
|
||||||
callables, datanode.getConf());
|
|
||||||
if (callables.isEmpty()) {
|
if (callables.isEmpty()) {
|
||||||
for(StorageDirectory sd : dirs) {
|
for(StorageDirectory sd : dirs) {
|
||||||
success.add(sd);
|
success.add(sd);
|
||||||
|
@ -566,34 +547,6 @@ public class DataStorage extends Storage {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Create physical directory for block pools on the data node
|
|
||||||
*
|
|
||||||
* @param dataDirs
|
|
||||||
* List of data directories
|
|
||||||
* @param conf
|
|
||||||
* Configuration instance to use.
|
|
||||||
* @throws IOException on errors
|
|
||||||
*/
|
|
||||||
static void makeBlockPoolDataDir(Collection<File> dataDirs,
|
|
||||||
Configuration conf) throws IOException {
|
|
||||||
if (conf == null)
|
|
||||||
conf = new HdfsConfiguration();
|
|
||||||
|
|
||||||
LocalFileSystem localFS = FileSystem.getLocal(conf);
|
|
||||||
FsPermission permission = new FsPermission(conf.get(
|
|
||||||
DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
|
|
||||||
DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
|
|
||||||
for (File data : dataDirs) {
|
|
||||||
try {
|
|
||||||
DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
|
|
||||||
} catch ( IOException e ) {
|
|
||||||
LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
|
|
||||||
+ e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void format(StorageDirectory sd, NamespaceInfo nsInfo,
|
void format(StorageDirectory sd, NamespaceInfo nsInfo,
|
||||||
String datanodeUuid) throws IOException {
|
String datanodeUuid) throws IOException {
|
||||||
sd.clearDirectory(); // create directory
|
sd.clearDirectory(); // create directory
|
||||||
|
|
|
@ -351,7 +351,13 @@ abstract public class LocalReplica extends ReplicaInfo {
|
||||||
@Override
|
@Override
|
||||||
public void updateWithReplica(StorageLocation replicaLocation) {
|
public void updateWithReplica(StorageLocation replicaLocation) {
|
||||||
// for local replicas, the replica location is assumed to be a file.
|
// for local replicas, the replica location is assumed to be a file.
|
||||||
File diskFile = replicaLocation.getFile();
|
File diskFile = null;
|
||||||
|
try {
|
||||||
|
diskFile = new File(replicaLocation.getUri());
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
diskFile = null;
|
||||||
|
}
|
||||||
|
|
||||||
if (null == diskFile) {
|
if (null == diskFile) {
|
||||||
setDirInternal(null);
|
setDirInternal(null);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -23,11 +23,21 @@ import java.util.regex.Pattern;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.LocalFileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
|
import org.apache.hadoop.util.DiskChecker;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,8 +50,7 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class StorageLocation implements Comparable<StorageLocation>{
|
public class StorageLocation implements Comparable<StorageLocation>{
|
||||||
final StorageType storageType;
|
final StorageType storageType;
|
||||||
final File file;
|
private final URI baseURI;
|
||||||
|
|
||||||
/** Regular expression that describes a storage uri with a storage type.
|
/** Regular expression that describes a storage uri with a storage type.
|
||||||
* e.g. [Disk]/storages/storage1/
|
* e.g. [Disk]/storages/storage1/
|
||||||
*/
|
*/
|
||||||
|
@ -49,26 +58,41 @@ public class StorageLocation implements Comparable<StorageLocation>{
|
||||||
|
|
||||||
private StorageLocation(StorageType storageType, URI uri) {
|
private StorageLocation(StorageType storageType, URI uri) {
|
||||||
this.storageType = storageType;
|
this.storageType = storageType;
|
||||||
|
if (uri.getScheme() == null || uri.getScheme().equals("file")) {
|
||||||
if (uri.getScheme() == null ||
|
// make sure all URIs that point to a file have the same scheme
|
||||||
"file".equalsIgnoreCase(uri.getScheme())) {
|
try {
|
||||||
// drop any (illegal) authority in the URI for backwards compatibility
|
File uriFile = new File(uri.getPath());
|
||||||
this.file = new File(uri.getPath());
|
String absPath = uriFile.getAbsolutePath();
|
||||||
} else {
|
uri = new URI("file", null, absPath, uri.getQuery(), uri.getFragment());
|
||||||
throw new IllegalArgumentException("Unsupported URI ecPolicy in " + uri);
|
} catch (URISyntaxException e) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"URI: " + uri + " is not in the expected format");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
baseURI = uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorageType getStorageType() {
|
public StorageType getStorageType() {
|
||||||
return this.storageType;
|
return this.storageType;
|
||||||
}
|
}
|
||||||
|
|
||||||
URI getUri() {
|
public URI getUri() {
|
||||||
return file.toURI();
|
return baseURI;
|
||||||
}
|
}
|
||||||
|
|
||||||
public File getFile() {
|
public URI getNormalizedUri() {
|
||||||
return this.file;
|
return baseURI.normalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean matchesStorageDirectory(StorageDirectory sd)
|
||||||
|
throws IOException {
|
||||||
|
return this.equals(sd.getStorageLocation());
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean matchesStorageDirectory(StorageDirectory sd,
|
||||||
|
String bpid) throws IOException {
|
||||||
|
return this.getBpURI(bpid, Storage.STORAGE_DIR_CURRENT).normalize()
|
||||||
|
.equals(sd.getRoot().toURI().normalize());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -94,13 +118,14 @@ public class StorageLocation implements Comparable<StorageLocation>{
|
||||||
StorageType.valueOf(StringUtils.toUpperCase(classString));
|
StorageType.valueOf(StringUtils.toUpperCase(classString));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
//do Path.toURI instead of new URI(location) as this ensures that
|
||||||
|
//"/a/b" and "/a/b/" are represented in a consistent manner
|
||||||
return new StorageLocation(storageType, new Path(location).toUri());
|
return new StorageLocation(storageType, new Path(location).toUri());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "[" + storageType + "]" + file.toURI();
|
return "[" + storageType + "]" + baseURI.normalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -126,16 +151,56 @@ public class StorageLocation implements Comparable<StorageLocation>{
|
||||||
}
|
}
|
||||||
|
|
||||||
StorageLocation otherStorage = (StorageLocation) obj;
|
StorageLocation otherStorage = (StorageLocation) obj;
|
||||||
if (this.getFile() != null && otherStorage.getFile() != null) {
|
if (this.getNormalizedUri() != null &&
|
||||||
return this.getFile().getAbsolutePath().compareTo(
|
otherStorage.getNormalizedUri() != null) {
|
||||||
otherStorage.getFile().getAbsolutePath());
|
return this.getNormalizedUri().compareTo(
|
||||||
} else if (this.getFile() == null && otherStorage.getFile() == null) {
|
otherStorage.getNormalizedUri());
|
||||||
|
} else if (this.getNormalizedUri() == null &&
|
||||||
|
otherStorage.getNormalizedUri() == null) {
|
||||||
return this.storageType.compareTo(otherStorage.getStorageType());
|
return this.storageType.compareTo(otherStorage.getStorageType());
|
||||||
} else if (this.getFile() == null) {
|
} else if (this.getNormalizedUri() == null) {
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public URI getBpURI(String bpid, String currentStorageDir) {
|
||||||
|
try {
|
||||||
|
File localFile = new File(getUri());
|
||||||
|
return new File(new File(localFile, currentStorageDir), bpid).toURI();
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create physical directory for block pools on the data node.
|
||||||
|
*
|
||||||
|
* @param blockPoolID
|
||||||
|
* the block pool id
|
||||||
|
* @param conf
|
||||||
|
* Configuration instance to use.
|
||||||
|
* @throws IOException on errors
|
||||||
|
*/
|
||||||
|
public void makeBlockPoolDir(String blockPoolID,
|
||||||
|
Configuration conf) throws IOException {
|
||||||
|
|
||||||
|
if (conf == null) {
|
||||||
|
conf = new HdfsConfiguration();
|
||||||
|
}
|
||||||
|
|
||||||
|
LocalFileSystem localFS = FileSystem.getLocal(conf);
|
||||||
|
FsPermission permission = new FsPermission(conf.get(
|
||||||
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
|
||||||
|
File data = new File(getBpURI(blockPoolID, Storage.STORAGE_DIR_CURRENT));
|
||||||
|
try {
|
||||||
|
DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
|
||||||
|
} catch (IOException e) {
|
||||||
|
DataStorage.LOG.warn("Invalid directory in: " + data.getCanonicalPath() +
|
||||||
|
": " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -562,16 +562,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private StorageType getStorageTypeFromLocations(
|
|
||||||
Collection<StorageLocation> dataLocations, File dir) {
|
|
||||||
for (StorageLocation dataLocation : dataLocations) {
|
|
||||||
if (dataLocation.getFile().equals(dir)) {
|
|
||||||
return dataLocation.getStorageType();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return StorageType.DEFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the total space used by dfs datanode
|
* Return the total space used by dfs datanode
|
||||||
*/
|
*/
|
||||||
|
@ -635,7 +625,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
infos.length);
|
infos.length);
|
||||||
for (VolumeFailureInfo info: infos) {
|
for (VolumeFailureInfo info: infos) {
|
||||||
failedStorageLocations.add(
|
failedStorageLocations.add(
|
||||||
info.getFailedStorageLocation().getFile().getAbsolutePath());
|
info.getFailedStorageLocation().getNormalizedUri().toString());
|
||||||
}
|
}
|
||||||
return failedStorageLocations.toArray(
|
return failedStorageLocations.toArray(
|
||||||
new String[failedStorageLocations.size()]);
|
new String[failedStorageLocations.size()]);
|
||||||
|
@ -674,7 +664,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
long estimatedCapacityLostTotal = 0;
|
long estimatedCapacityLostTotal = 0;
|
||||||
for (VolumeFailureInfo info: infos) {
|
for (VolumeFailureInfo info: infos) {
|
||||||
failedStorageLocations.add(
|
failedStorageLocations.add(
|
||||||
info.getFailedStorageLocation().getFile().getAbsolutePath());
|
info.getFailedStorageLocation().getNormalizedUri().toString());
|
||||||
long failureDate = info.getFailureDate();
|
long failureDate = info.getFailureDate();
|
||||||
if (failureDate > lastVolumeFailureDate) {
|
if (failureDate > lastVolumeFailureDate) {
|
||||||
lastVolumeFailureDate = failureDate;
|
lastVolumeFailureDate = failureDate;
|
||||||
|
|
|
@ -231,9 +231,9 @@ public class TestNameNodePrunesMissingStorages {
|
||||||
// it would be re-initialized with a new storage ID.)
|
// it would be re-initialized with a new storage ID.)
|
||||||
assertNotNull(volumeLocationToRemove);
|
assertNotNull(volumeLocationToRemove);
|
||||||
datanodeToRemoveStorageFrom.shutdown();
|
datanodeToRemoveStorageFrom.shutdown();
|
||||||
FileUtil.fullyDelete(volumeLocationToRemove.getFile());
|
FileUtil.fullyDelete(new File(volumeLocationToRemove.getUri()));
|
||||||
FileOutputStream fos = new FileOutputStream(
|
FileOutputStream fos = new FileOutputStream(
|
||||||
volumeLocationToRemove.getFile().toString());
|
new File(volumeLocationToRemove.getUri()));
|
||||||
try {
|
try {
|
||||||
fos.write(1);
|
fos.write(1);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -327,7 +327,8 @@ public class TestNameNodePrunesMissingStorages {
|
||||||
final String newStorageId = DatanodeStorage.generateUuid();
|
final String newStorageId = DatanodeStorage.generateUuid();
|
||||||
try {
|
try {
|
||||||
File currentDir = new File(
|
File currentDir = new File(
|
||||||
volumeRefs.get(0).getStorageLocation().getFile(), "current");
|
new File(volumeRefs.get(0).getStorageLocation().getUri()),
|
||||||
|
"current");
|
||||||
File versionFile = new File(currentDir, "VERSION");
|
File versionFile = new File(currentDir, "VERSION");
|
||||||
rewriteVersionFile(versionFile, newStorageId);
|
rewriteVersionFile(versionFile, newStorageId);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -29,6 +29,7 @@ import static org.junit.Assert.assertFalse;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.URI;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -549,9 +550,8 @@ public class TestBlockScanner {
|
||||||
info.shouldRun = false;
|
info.shouldRun = false;
|
||||||
}
|
}
|
||||||
ctx.datanode.shutdown();
|
ctx.datanode.shutdown();
|
||||||
String vPath = ctx.volumes.get(0).getStorageLocation()
|
URI vURI = ctx.volumes.get(0).getStorageLocation().getUri();
|
||||||
.getFile().getAbsolutePath();
|
File cursorPath = new File(new File(new File(new File(vURI), "current"),
|
||||||
File cursorPath = new File(new File(new File(vPath, "current"),
|
|
||||||
ctx.bpids[0]), "scanner.cursor");
|
ctx.bpids[0]), "scanner.cursor");
|
||||||
assertTrue("Failed to find cursor save file in " +
|
assertTrue("Failed to find cursor save file in " +
|
||||||
cursorPath.getAbsolutePath(), cursorPath.exists());
|
cursorPath.getAbsolutePath(), cursorPath.exists());
|
||||||
|
|
|
@ -114,7 +114,8 @@ public class TestDataDirs {
|
||||||
List<StorageLocation> checkedLocations =
|
List<StorageLocation> checkedLocations =
|
||||||
DataNode.checkStorageLocations(locations, fs, diskChecker);
|
DataNode.checkStorageLocations(locations, fs, diskChecker);
|
||||||
assertEquals("number of valid data dirs", 1, checkedLocations.size());
|
assertEquals("number of valid data dirs", 1, checkedLocations.size());
|
||||||
String validDir = checkedLocations.iterator().next().getFile().getPath();
|
String validDir =
|
||||||
|
new File(checkedLocations.iterator().next().getUri()).getPath();
|
||||||
assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir));
|
assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,7 +221,7 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
}
|
}
|
||||||
assertFalse(oldLocations.isEmpty());
|
assertFalse(oldLocations.isEmpty());
|
||||||
|
|
||||||
String newPaths = oldLocations.get(0).getFile().getAbsolutePath() +
|
String newPaths = new File(oldLocations.get(0).getUri()).getAbsolutePath() +
|
||||||
",/foo/path1,/foo/path2";
|
",/foo/path1,/foo/path2";
|
||||||
|
|
||||||
DataNode.ChangedVolumes changedVolumes =
|
DataNode.ChangedVolumes changedVolumes =
|
||||||
|
@ -229,18 +229,18 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
List<StorageLocation> newVolumes = changedVolumes.newLocations;
|
List<StorageLocation> newVolumes = changedVolumes.newLocations;
|
||||||
assertEquals(2, newVolumes.size());
|
assertEquals(2, newVolumes.size());
|
||||||
assertEquals(new File("/foo/path1").getAbsolutePath(),
|
assertEquals(new File("/foo/path1").getAbsolutePath(),
|
||||||
newVolumes.get(0).getFile().getAbsolutePath());
|
new File(newVolumes.get(0).getUri()).getAbsolutePath());
|
||||||
assertEquals(new File("/foo/path2").getAbsolutePath(),
|
assertEquals(new File("/foo/path2").getAbsolutePath(),
|
||||||
newVolumes.get(1).getFile().getAbsolutePath());
|
new File(newVolumes.get(1).getUri()).getAbsolutePath());
|
||||||
|
|
||||||
List<StorageLocation> removedVolumes = changedVolumes.deactivateLocations;
|
List<StorageLocation> removedVolumes = changedVolumes.deactivateLocations;
|
||||||
assertEquals(1, removedVolumes.size());
|
assertEquals(1, removedVolumes.size());
|
||||||
assertEquals(oldLocations.get(1).getFile(),
|
assertEquals(oldLocations.get(1).getNormalizedUri(),
|
||||||
removedVolumes.get(0).getFile());
|
removedVolumes.get(0).getNormalizedUri());
|
||||||
|
|
||||||
assertEquals(1, changedVolumes.unchangedLocations.size());
|
assertEquals(1, changedVolumes.unchangedLocations.size());
|
||||||
assertEquals(oldLocations.get(0).getFile(),
|
assertEquals(oldLocations.get(0).getNormalizedUri(),
|
||||||
changedVolumes.unchangedLocations.get(0).getFile());
|
changedVolumes.unchangedLocations.get(0).getNormalizedUri());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -519,7 +519,7 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
DFSTestUtil.getAllBlocks(fs, testFile).get(1).getBlock();
|
DFSTestUtil.getAllBlocks(fs, testFile).get(1).getBlock();
|
||||||
FsVolumeSpi volumeWithBlock = dn.getFSDataset().getVolume(block);
|
FsVolumeSpi volumeWithBlock = dn.getFSDataset().getVolume(block);
|
||||||
String dirWithBlock = "[" + volumeWithBlock.getStorageType() + "]" +
|
String dirWithBlock = "[" + volumeWithBlock.getStorageType() + "]" +
|
||||||
volumeWithBlock.getStorageLocation().getFile().toURI();
|
volumeWithBlock.getStorageLocation().getUri();
|
||||||
String newDirs = dirWithBlock;
|
String newDirs = dirWithBlock;
|
||||||
for (String dir : oldDirs) {
|
for (String dir : oldDirs) {
|
||||||
if (dirWithBlock.startsWith(dir)) {
|
if (dirWithBlock.startsWith(dir)) {
|
||||||
|
@ -577,7 +577,7 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
try (FsDatasetSpi.FsVolumeReferences volumes =
|
try (FsDatasetSpi.FsVolumeReferences volumes =
|
||||||
dataset.getFsVolumeReferences()) {
|
dataset.getFsVolumeReferences()) {
|
||||||
for (FsVolumeSpi volume : volumes) {
|
for (FsVolumeSpi volume : volumes) {
|
||||||
assertThat(volume.getStorageLocation().getFile().toString(),
|
assertThat(new File(volume.getStorageLocation().getUri()).toString(),
|
||||||
is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
|
is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -593,8 +593,10 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
|
dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
|
||||||
assertEquals(4, effectiveVolumes.length);
|
assertEquals(4, effectiveVolumes.length);
|
||||||
for (String ev : effectiveVolumes) {
|
for (String ev : effectiveVolumes) {
|
||||||
assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(),
|
assertThat(
|
||||||
is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
|
new File(StorageLocation.parse(ev).getUri()).getCanonicalPath(),
|
||||||
|
is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2)))))
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -253,7 +253,7 @@ public class TestDataNodeVolumeFailure {
|
||||||
FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
|
FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
|
||||||
try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
|
try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
|
||||||
for (FsVolumeSpi volume : vols) {
|
for (FsVolumeSpi volume : vols) {
|
||||||
assertFalse(volume.getStorageLocation().getFile()
|
assertFalse(new File(volume.getStorageLocation().getUri())
|
||||||
.getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
|
.getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -262,7 +262,7 @@ public class TestDataNodeVolumeFailure {
|
||||||
// 3. all blocks on dn0Vol1 have been removed.
|
// 3. all blocks on dn0Vol1 have been removed.
|
||||||
for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
|
for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
|
||||||
assertNotNull(replica.getVolume());
|
assertNotNull(replica.getVolume());
|
||||||
assertFalse(replica.getVolume().getStorageLocation().getFile()
|
assertFalse(new File(replica.getVolume().getStorageLocation().getUri())
|
||||||
.getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
|
.getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,8 @@ import static org.junit.Assert.assertThat;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -467,7 +469,8 @@ public class TestDataNodeVolumeFailureReporting {
|
||||||
DataNodeTestUtils.triggerHeartbeat(dn);
|
DataNodeTestUtils.triggerHeartbeat(dn);
|
||||||
FsDatasetSpi<?> fsd = dn.getFSDataset();
|
FsDatasetSpi<?> fsd = dn.getFSDataset();
|
||||||
assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
|
assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
|
||||||
assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations());
|
assertArrayEquals(expectedFailedVolumes,
|
||||||
|
convertToAbsolutePaths(fsd.getFailedStorageLocations()));
|
||||||
// there shouldn't be any more volume failures due to I/O failure
|
// there shouldn't be any more volume failures due to I/O failure
|
||||||
checkFailuresAtDataNode(dn, 0, false, expectedFailedVolumes);
|
checkFailuresAtDataNode(dn, 0, false, expectedFailedVolumes);
|
||||||
|
|
||||||
|
@ -550,7 +553,8 @@ public class TestDataNodeVolumeFailureReporting {
|
||||||
}
|
}
|
||||||
LOG.info(strBuilder.toString());
|
LOG.info(strBuilder.toString());
|
||||||
assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
|
assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
|
||||||
assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations());
|
assertArrayEquals(expectedFailedVolumes,
|
||||||
|
convertToAbsolutePaths(fsd.getFailedStorageLocations()));
|
||||||
if (expectedFailedVolumes.length > 0) {
|
if (expectedFailedVolumes.length > 0) {
|
||||||
assertTrue(fsd.getLastVolumeFailureDate() > 0);
|
assertTrue(fsd.getLastVolumeFailureDate() > 0);
|
||||||
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
|
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
|
||||||
|
@ -582,8 +586,9 @@ public class TestDataNodeVolumeFailureReporting {
|
||||||
assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures());
|
assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures());
|
||||||
VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary();
|
VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary();
|
||||||
if (expectedFailedVolumes.length > 0) {
|
if (expectedFailedVolumes.length > 0) {
|
||||||
assertArrayEquals(expectedFailedVolumes, volumeFailureSummary
|
assertArrayEquals(expectedFailedVolumes,
|
||||||
.getFailedStorageLocations());
|
convertToAbsolutePaths(volumeFailureSummary
|
||||||
|
.getFailedStorageLocations()));
|
||||||
assertTrue(volumeFailureSummary.getLastVolumeFailureDate() > 0);
|
assertTrue(volumeFailureSummary.getLastVolumeFailureDate() > 0);
|
||||||
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
|
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
|
||||||
expectedFailedVolumes.length);
|
expectedFailedVolumes.length);
|
||||||
|
@ -594,6 +599,30 @@ public class TestDataNodeVolumeFailureReporting {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts the provided paths to absolute file paths.
|
||||||
|
* @param locations the array of paths
|
||||||
|
* @return array of absolute paths
|
||||||
|
*/
|
||||||
|
private String[] convertToAbsolutePaths(String[] locations) {
|
||||||
|
if (locations == null || locations.length == 0) {
|
||||||
|
return new String[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
String[] absolutePaths = new String[locations.length];
|
||||||
|
for (int count = 0; count < locations.length; count++) {
|
||||||
|
try {
|
||||||
|
absolutePaths[count] = new File(new URI(locations[count]))
|
||||||
|
.getAbsolutePath();
|
||||||
|
} catch (URISyntaxException e) {
|
||||||
|
//if the provided location is not an URI,
|
||||||
|
//we use it as the absolute path
|
||||||
|
absolutePaths[count] = locations[count];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return absolutePaths;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns expected capacity lost for use in assertions. The return value is
|
* Returns expected capacity lost for use in assertions. The return value is
|
||||||
* dependent on whether or not it is expected that the volume capacities were
|
* dependent on whether or not it is expected that the volume capacities were
|
||||||
|
|
|
@ -142,8 +142,8 @@ public class TestDataStorage {
|
||||||
for (NamespaceInfo ni : namespaceInfos) {
|
for (NamespaceInfo ni : namespaceInfos) {
|
||||||
storage.addStorageLocations(mockDN, ni, locations, START_OPT);
|
storage.addStorageLocations(mockDN, ni, locations, START_OPT);
|
||||||
for (StorageLocation sl : locations) {
|
for (StorageLocation sl : locations) {
|
||||||
checkDir(sl.getFile());
|
checkDir(new File(sl.getUri()));
|
||||||
checkDir(sl.getFile(), ni.getBlockPoolID());
|
checkDir(new File(sl.getUri()), ni.getBlockPoolID());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,8 +173,7 @@ public class TestDataStorage {
|
||||||
List<StorageLocation> locations = createStorageLocations(numLocations);
|
List<StorageLocation> locations = createStorageLocations(numLocations);
|
||||||
|
|
||||||
StorageLocation firstStorage = locations.get(0);
|
StorageLocation firstStorage = locations.get(0);
|
||||||
Storage.StorageDirectory sd = new Storage.StorageDirectory(
|
Storage.StorageDirectory sd = new Storage.StorageDirectory(firstStorage);
|
||||||
firstStorage.getFile());
|
|
||||||
// the directory is not initialized so VERSION does not exist
|
// the directory is not initialized so VERSION does not exist
|
||||||
// create a fake directory under current/
|
// create a fake directory under current/
|
||||||
File currentDir = new File(sd.getCurrentDir(),
|
File currentDir = new File(sd.getCurrentDir(),
|
||||||
|
|
|
@ -189,8 +189,8 @@ public class TestDirectoryScanner {
|
||||||
// Volume without a copy of the block. Make a copy now.
|
// Volume without a copy of the block. Make a copy now.
|
||||||
File sourceBlock = new File(b.getBlockURI());
|
File sourceBlock = new File(b.getBlockURI());
|
||||||
File sourceMeta = new File(b.getMetadataURI());
|
File sourceMeta = new File(b.getMetadataURI());
|
||||||
URI sourceRoot = b.getVolume().getStorageLocation().getFile().toURI();
|
URI sourceRoot = b.getVolume().getStorageLocation().getUri();
|
||||||
URI destRoot = v.getStorageLocation().getFile().toURI();
|
URI destRoot = v.getStorageLocation().getUri();
|
||||||
|
|
||||||
String relativeBlockPath =
|
String relativeBlockPath =
|
||||||
sourceRoot.relativize(sourceBlock.toURI())
|
sourceRoot.relativize(sourceBlock.toURI())
|
||||||
|
|
|
@ -199,8 +199,7 @@ public class TestDiskError {
|
||||||
try (FsDatasetSpi.FsVolumeReferences volumes =
|
try (FsDatasetSpi.FsVolumeReferences volumes =
|
||||||
dn.getFSDataset().getFsVolumeReferences()) {
|
dn.getFSDataset().getFsVolumeReferences()) {
|
||||||
for (FsVolumeSpi vol : volumes) {
|
for (FsVolumeSpi vol : volumes) {
|
||||||
String dir = vol.getStorageLocation().getFile().getAbsolutePath();
|
Path dataDir = new Path(vol.getStorageLocation().getNormalizedUri());
|
||||||
Path dataDir = new Path(dir);
|
|
||||||
FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
|
FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
|
||||||
assertEquals("Permission for dir: " + dataDir + ", is " + actual +
|
assertEquals("Permission for dir: " + dataDir + ", is " + actual +
|
||||||
", while expected is " + expected, expected, actual);
|
", while expected is " + expected, expected, actual);
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class FsDatasetTestUtil {
|
||||||
*/
|
*/
|
||||||
public static void assertFileLockReleased(String dir) throws IOException {
|
public static void assertFileLockReleased(String dir) throws IOException {
|
||||||
StorageLocation sl = StorageLocation.parse(dir);
|
StorageLocation sl = StorageLocation.parse(dir);
|
||||||
File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK);
|
File lockFile = new File(new File(sl.getUri()), Storage.STORAGE_FILE_LOCK);
|
||||||
try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
|
try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
|
||||||
FileChannel channel = raf.getChannel()) {
|
FileChannel channel = raf.getChannel()) {
|
||||||
FileLock lock = channel.tryLock();
|
FileLock lock = channel.tryLock();
|
||||||
|
|
|
@ -290,7 +290,7 @@ public class TestDFSAdmin {
|
||||||
datanode.getConf());
|
datanode.getConf());
|
||||||
if (expectedSuccuss) {
|
if (expectedSuccuss) {
|
||||||
assertThat(locations.size(), is(1));
|
assertThat(locations.size(), is(1));
|
||||||
assertThat(locations.get(0).getFile(), is(newDir));
|
assertThat(new File(locations.get(0).getUri()), is(newDir));
|
||||||
// Verify the directory is appropriately formatted.
|
// Verify the directory is appropriately formatted.
|
||||||
assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());
|
assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue