diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index e55de353878..1f03fc27cd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -278,7 +278,7 @@ public abstract class Storage extends StorageInfo { public StorageDirectory(StorageLocation location) { // default dirType is null - this(location.getFile(), null, false, location); + this(null, false, location); } public StorageDirectory(File dir, StorageDirType dirType) { @@ -304,20 +304,57 @@ public abstract class Storage extends StorageInfo { this(dir, dirType, isShared, null); } - public StorageDirectory(File dir, StorageDirType dirType, + /** + * Constructor + * @param dirType storage directory type + * @param isShared whether or not this dir is shared between two NNs. true + * disables locking on the storage directory, false enables locking + * @param location the {@link StorageLocation} for this directory + */ + public StorageDirectory(StorageDirType dirType, boolean isShared, + StorageLocation location) { + this(getStorageLocationFile(location), dirType, isShared, location); + } + + /** + * Constructor + * @param bpid the block pool id + * @param dirType storage directory type + * @param isShared whether or not this dir is shared between two NNs. true + * disables locking on the storage directory, false enables locking + * @param location the {@link StorageLocation} for this directory + */ + public StorageDirectory(String bpid, StorageDirType dirType, + boolean isShared, StorageLocation location) { + this(new File(location.getBpURI(bpid, STORAGE_DIR_CURRENT)), dirType, + isShared, location); + } + + private StorageDirectory(File dir, StorageDirType dirType, boolean isShared, StorageLocation location) { this.root = dir; this.lock = null; this.dirType = dirType; this.isShared = isShared; this.location = location; - assert location == null || + assert location == null || dir == null || dir.getAbsolutePath().startsWith( - location.getFile().getAbsolutePath()): + new File(location.getUri()).getAbsolutePath()): "The storage location and directory should be equal"; } - + private static File getStorageLocationFile(StorageLocation location) { + if (location == null) { + return null; + } + try { + return new File(location.getUri()); + } catch (IllegalArgumentException e) { + //if location does not refer to a File + return null; + } + } + /** * Get root directory of this storage */ @@ -932,6 +969,41 @@ public abstract class Storage extends StorageInfo { return false; } + /** + * Returns true if the storage directory on the given directory is already + * loaded. + * @param location the {@link StorageLocation} + * @throws IOException if failed to get canonical path. + */ + protected boolean containsStorageDir(StorageLocation location) + throws IOException { + for (StorageDirectory sd : storageDirs) { + if (location.matchesStorageDirectory(sd)) { + return true; + } + } + return false; + } + + /** + * Returns true if the storage directory on the given location is already + * loaded. + * @param location the {@link StorageLocation} + * @param bpid the block pool id + * @return true if the location matches to any existing storage directories + * @throws IOException IOException if failed to read location + * or storage directory path + */ + protected boolean containsStorageDir(StorageLocation location, String bpid) + throws IOException { + for (StorageDirectory sd : storageDirs) { + if (location.matchesStorageDirectory(sd, bpid)) { + return true; + } + } + return false; + } + /** * Return true if the layout of the given storage directory is from a version * of Hadoop prior to the introduction of the "current" and "previous" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index e3b6da10469..9bd221ec80e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -22,7 +22,6 @@ import java.io.File; import java.io.IOException; import java.net.URI; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -147,10 +146,11 @@ public class BlockPoolSliceStorage extends Storage { * @throws IOException */ private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo, - File dataDir, StorageLocation location, StartupOption startOpt, + StorageLocation location, StartupOption startOpt, List> callables, Configuration conf) throws IOException { - StorageDirectory sd = new StorageDirectory(dataDir, null, true, location); + StorageDirectory sd = new StorageDirectory( + nsInfo.getBlockPoolID(), null, true, location); try { StorageState curState = sd.analyzeStorage(startOpt, this, true); // sd is locked but not opened @@ -158,11 +158,15 @@ public class BlockPoolSliceStorage extends Storage { case NORMAL: break; case NON_EXISTENT: - LOG.info("Block pool storage directory " + dataDir + " does not exist"); - throw new IOException("Storage directory " + dataDir - + " does not exist"); + LOG.info("Block pool storage directory for location " + location + + " and block pool id " + nsInfo.getBlockPoolID() + + " does not exist"); + throw new IOException("Storage directory for location " + location + + " and block pool id " + nsInfo.getBlockPoolID() + + " does not exist"); case NOT_FORMATTED: // format - LOG.info("Block pool storage directory " + dataDir + LOG.info("Block pool storage directory for location " + location + + " and block pool id " + nsInfo.getBlockPoolID() + " is not formatted for " + nsInfo.getBlockPoolID() + ". Formatting ..."); format(sd, nsInfo); @@ -208,21 +212,19 @@ public class BlockPoolSliceStorage extends Storage { * @throws IOException on error */ List loadBpStorageDirectories(NamespaceInfo nsInfo, - Collection dataDirs, StorageLocation location, - StartupOption startOpt, List> callables, - Configuration conf) throws IOException { + StorageLocation location, StartupOption startOpt, + List> callables, Configuration conf) + throws IOException { List succeedDirs = Lists.newArrayList(); try { - for (File dataDir : dataDirs) { - if (containsStorageDir(dataDir)) { - throw new IOException( - "BlockPoolSliceStorage.recoverTransitionRead: " + - "attempt to load an used block storage: " + dataDir); - } - final StorageDirectory sd = loadStorageDirectory( - nsInfo, dataDir, location, startOpt, callables, conf); - succeedDirs.add(sd); + if (containsStorageDir(location, nsInfo.getBlockPoolID())) { + throw new IOException( + "BlockPoolSliceStorage.recoverTransitionRead: " + + "attempt to load an used block storage: " + location); } + final StorageDirectory sd = loadStorageDirectory( + nsInfo, location, startOpt, callables, conf); + succeedDirs.add(sd); } catch (IOException e) { LOG.warn("Failed to analyze storage directories for block pool " + nsInfo.getBlockPoolID(), e); @@ -244,12 +246,12 @@ public class BlockPoolSliceStorage extends Storage { * @throws IOException on error */ List recoverTransitionRead(NamespaceInfo nsInfo, - Collection dataDirs, StorageLocation location, - StartupOption startOpt, List> callables, - Configuration conf) throws IOException { + StorageLocation location, StartupOption startOpt, + List> callables, Configuration conf) + throws IOException { LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID()); final List loaded = loadBpStorageDirectories( - nsInfo, dataDirs, location, startOpt, callables, conf); + nsInfo, location, startOpt, callables, conf); for (StorageDirectory sd : loaded) { addStorageDir(sd); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 8f65efe0298..416c138dc32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -648,7 +648,7 @@ public class DataNode extends ReconfigurableBase // Use the existing StorageLocation to detect storage type changes. Map existingLocations = new HashMap<>(); for (StorageLocation loc : getStorageLocations(getConf())) { - existingLocations.put(loc.getFile().getCanonicalPath(), loc); + existingLocations.put(loc.getNormalizedUri().toString(), loc); } ChangedVolumes results = new ChangedVolumes(); @@ -661,11 +661,10 @@ public class DataNode extends ReconfigurableBase for (Iterator sl = results.newLocations.iterator(); sl.hasNext(); ) { StorageLocation location = sl.next(); - if (location.getFile().getCanonicalPath().equals( - dir.getRoot().getCanonicalPath())) { + if (location.matchesStorageDirectory(dir)) { sl.remove(); StorageLocation old = existingLocations.get( - location.getFile().getCanonicalPath()); + location.getNormalizedUri().toString()); if (old != null && old.getStorageType() != location.getStorageType()) { throw new IOException("Changing storage type is not allowed."); @@ -2676,7 +2675,7 @@ public class DataNode extends ReconfigurableBase locations.add(location); } catch (IOException ioe) { LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " " - + location.getFile() + " : ", ioe); + + location + " : ", ioe); invalidDirs.append("\"").append(uri.getPath()).append("\" "); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 7c9bea51c81..29b14e731ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -46,15 +46,10 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.HardLink; -import org.apache.hadoop.fs.LocalFileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -66,8 +61,6 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.DiskChecker; - import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ComparisonChain; import com.google.common.collect.Lists; @@ -263,10 +256,9 @@ public class DataStorage extends Storage { } private StorageDirectory loadStorageDirectory(DataNode datanode, - NamespaceInfo nsInfo, File dataDir, StorageLocation location, - StartupOption startOpt, List> callables) - throws IOException { - StorageDirectory sd = new StorageDirectory(dataDir, null, false, location); + NamespaceInfo nsInfo, StorageLocation location, StartupOption startOpt, + List> callables) throws IOException { + StorageDirectory sd = new StorageDirectory(null, false, location); try { StorageState curState = sd.analyzeStorage(startOpt, this, true); // sd is locked but not opened @@ -274,11 +266,12 @@ public class DataStorage extends Storage { case NORMAL: break; case NON_EXISTENT: - LOG.info("Storage directory " + dataDir + " does not exist"); - throw new IOException("Storage directory " + dataDir + LOG.info("Storage directory with location " + location + + " does not exist"); + throw new IOException("Storage directory with location " + location + " does not exist"); case NOT_FORMATTED: // format - LOG.info("Storage directory " + dataDir + LOG.info("Storage directory with location " + location + " is not formatted for namespace " + nsInfo.getNamespaceID() + ". Formatting..."); format(sd, nsInfo, datanode.getDatanodeUuid()); @@ -322,28 +315,22 @@ public class DataStorage extends Storage { public VolumeBuilder prepareVolume(DataNode datanode, StorageLocation location, List nsInfos) throws IOException { - File volume = location.getFile(); - if (containsStorageDir(volume)) { + if (containsStorageDir(location)) { final String errorMessage = "Storage directory is in use"; LOG.warn(errorMessage + "."); throw new IOException(errorMessage); } StorageDirectory sd = loadStorageDirectory( - datanode, nsInfos.get(0), volume, location, - StartupOption.HOTSWAP, null); + datanode, nsInfos.get(0), location, StartupOption.HOTSWAP, null); VolumeBuilder builder = new VolumeBuilder(this, sd); for (NamespaceInfo nsInfo : nsInfos) { - List bpDataDirs = Lists.newArrayList(); - bpDataDirs.add(BlockPoolSliceStorage.getBpRoot( - nsInfo.getBlockPoolID(), new File(volume, STORAGE_DIR_CURRENT))); - makeBlockPoolDataDir(bpDataDirs, null); + location.makeBlockPoolDir(nsInfo.getBlockPoolID(), null); final BlockPoolSliceStorage bpStorage = getBlockPoolSliceStorage(nsInfo); final List dirs = bpStorage.loadBpStorageDirectories( - nsInfo, bpDataDirs, location, StartupOption.HOTSWAP, - null, datanode.getConf()); + nsInfo, location, StartupOption.HOTSWAP, null, datanode.getConf()); builder.addBpStorageDirectories(nsInfo.getBlockPoolID(), dirs); } return builder; @@ -405,14 +392,13 @@ public class DataStorage extends Storage { final List success = Lists.newArrayList(); final List tasks = Lists.newArrayList(); for (StorageLocation dataDir : dataDirs) { - File root = dataDir.getFile(); - if (!containsStorageDir(root)) { + if (!containsStorageDir(dataDir)) { try { // It first ensures the datanode level format is completed. final List> callables = Lists.newArrayList(); final StorageDirectory sd = loadStorageDirectory( - datanode, nsInfo, root, dataDir, startOpt, callables); + datanode, nsInfo, dataDir, startOpt, callables); if (callables.isEmpty()) { addStorageDir(sd); success.add(dataDir); @@ -455,16 +441,11 @@ public class DataStorage extends Storage { final List success = Lists.newArrayList(); final List tasks = Lists.newArrayList(); for (StorageLocation dataDir : dataDirs) { - final File curDir = new File(dataDir.getFile(), STORAGE_DIR_CURRENT); - List bpDataDirs = new ArrayList(); - bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(bpid, curDir)); + dataDir.makeBlockPoolDir(bpid, null); try { - makeBlockPoolDataDir(bpDataDirs, null); - final List> callables = Lists.newArrayList(); final List dirs = bpStorage.recoverTransitionRead( - nsInfo, bpDataDirs, dataDir, startOpt, - callables, datanode.getConf()); + nsInfo, dataDir, startOpt, callables, datanode.getConf()); if (callables.isEmpty()) { for(StorageDirectory sd : dirs) { success.add(sd); @@ -566,34 +547,6 @@ public class DataStorage extends Storage { } } - /** - * Create physical directory for block pools on the data node - * - * @param dataDirs - * List of data directories - * @param conf - * Configuration instance to use. - * @throws IOException on errors - */ - static void makeBlockPoolDataDir(Collection dataDirs, - Configuration conf) throws IOException { - if (conf == null) - conf = new HdfsConfiguration(); - - LocalFileSystem localFS = FileSystem.getLocal(conf); - FsPermission permission = new FsPermission(conf.get( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, - DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT)); - for (File data : dataDirs) { - try { - DiskChecker.checkDir(localFS, new Path(data.toURI()), permission); - } catch ( IOException e ) { - LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": " - + e.getMessage()); - } - } - } - void format(StorageDirectory sd, NamespaceInfo nsInfo, String datanodeUuid) throws IOException { sd.clearDirectory(); // create directory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java index 58febf02cab..f8291112ec7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java @@ -351,7 +351,13 @@ abstract public class LocalReplica extends ReplicaInfo { @Override public void updateWithReplica(StorageLocation replicaLocation) { // for local replicas, the replica location is assumed to be a file. - File diskFile = replicaLocation.getFile(); + File diskFile = null; + try { + diskFile = new File(replicaLocation.getUri()); + } catch (IllegalArgumentException e) { + diskFile = null; + } + if (null == diskFile) { setDirInternal(null); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java index 75abc1d39a6..a0403955537 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java @@ -23,11 +23,21 @@ import java.util.regex.Pattern; import java.io.File; import java.io.IOException; import java.net.URI; +import java.net.URISyntaxException; import java.util.regex.Matcher; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.StringUtils; @@ -40,8 +50,7 @@ import org.apache.hadoop.util.StringUtils; @InterfaceAudience.Private public class StorageLocation implements Comparable{ final StorageType storageType; - final File file; - + private final URI baseURI; /** Regular expression that describes a storage uri with a storage type. * e.g. [Disk]/storages/storage1/ */ @@ -49,26 +58,41 @@ public class StorageLocation implements Comparable{ private StorageLocation(StorageType storageType, URI uri) { this.storageType = storageType; - - if (uri.getScheme() == null || - "file".equalsIgnoreCase(uri.getScheme())) { - // drop any (illegal) authority in the URI for backwards compatibility - this.file = new File(uri.getPath()); - } else { - throw new IllegalArgumentException("Unsupported URI ecPolicy in " + uri); + if (uri.getScheme() == null || uri.getScheme().equals("file")) { + // make sure all URIs that point to a file have the same scheme + try { + File uriFile = new File(uri.getPath()); + String absPath = uriFile.getAbsolutePath(); + uri = new URI("file", null, absPath, uri.getQuery(), uri.getFragment()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException( + "URI: " + uri + " is not in the expected format"); + } } + baseURI = uri; } public StorageType getStorageType() { return this.storageType; } - URI getUri() { - return file.toURI(); + public URI getUri() { + return baseURI; } - public File getFile() { - return this.file; + public URI getNormalizedUri() { + return baseURI.normalize(); + } + + public boolean matchesStorageDirectory(StorageDirectory sd) + throws IOException { + return this.equals(sd.getStorageLocation()); + } + + public boolean matchesStorageDirectory(StorageDirectory sd, + String bpid) throws IOException { + return this.getBpURI(bpid, Storage.STORAGE_DIR_CURRENT).normalize() + .equals(sd.getRoot().toURI().normalize()); } /** @@ -94,13 +118,14 @@ public class StorageLocation implements Comparable{ StorageType.valueOf(StringUtils.toUpperCase(classString)); } } - + //do Path.toURI instead of new URI(location) as this ensures that + //"/a/b" and "/a/b/" are represented in a consistent manner return new StorageLocation(storageType, new Path(location).toUri()); } @Override public String toString() { - return "[" + storageType + "]" + file.toURI(); + return "[" + storageType + "]" + baseURI.normalize(); } @Override @@ -126,16 +151,56 @@ public class StorageLocation implements Comparable{ } StorageLocation otherStorage = (StorageLocation) obj; - if (this.getFile() != null && otherStorage.getFile() != null) { - return this.getFile().getAbsolutePath().compareTo( - otherStorage.getFile().getAbsolutePath()); - } else if (this.getFile() == null && otherStorage.getFile() == null) { + if (this.getNormalizedUri() != null && + otherStorage.getNormalizedUri() != null) { + return this.getNormalizedUri().compareTo( + otherStorage.getNormalizedUri()); + } else if (this.getNormalizedUri() == null && + otherStorage.getNormalizedUri() == null) { return this.storageType.compareTo(otherStorage.getStorageType()); - } else if (this.getFile() == null) { + } else if (this.getNormalizedUri() == null) { return -1; } else { return 1; } } + + public URI getBpURI(String bpid, String currentStorageDir) { + try { + File localFile = new File(getUri()); + return new File(new File(localFile, currentStorageDir), bpid).toURI(); + } catch (IllegalArgumentException e) { + return null; + } + } + + /** + * Create physical directory for block pools on the data node. + * + * @param blockPoolID + * the block pool id + * @param conf + * Configuration instance to use. + * @throws IOException on errors + */ + public void makeBlockPoolDir(String blockPoolID, + Configuration conf) throws IOException { + + if (conf == null) { + conf = new HdfsConfiguration(); + } + + LocalFileSystem localFS = FileSystem.getLocal(conf); + FsPermission permission = new FsPermission(conf.get( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, + DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT)); + File data = new File(getBpURI(blockPoolID, Storage.STORAGE_DIR_CURRENT)); + try { + DiskChecker.checkDir(localFS, new Path(data.toURI()), permission); + } catch (IOException e) { + DataStorage.LOG.warn("Invalid directory in: " + data.getCanonicalPath() + + ": " + e.getMessage()); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 7e7ae4f901a..c61fc578738 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -562,16 +562,6 @@ class FsDatasetImpl implements FsDatasetSpi { } } - private StorageType getStorageTypeFromLocations( - Collection dataLocations, File dir) { - for (StorageLocation dataLocation : dataLocations) { - if (dataLocation.getFile().equals(dir)) { - return dataLocation.getStorageType(); - } - } - return StorageType.DEFAULT; - } - /** * Return the total space used by dfs datanode */ @@ -635,7 +625,7 @@ class FsDatasetImpl implements FsDatasetSpi { infos.length); for (VolumeFailureInfo info: infos) { failedStorageLocations.add( - info.getFailedStorageLocation().getFile().getAbsolutePath()); + info.getFailedStorageLocation().getNormalizedUri().toString()); } return failedStorageLocations.toArray( new String[failedStorageLocations.size()]); @@ -674,7 +664,7 @@ class FsDatasetImpl implements FsDatasetSpi { long estimatedCapacityLostTotal = 0; for (VolumeFailureInfo info: infos) { failedStorageLocations.add( - info.getFailedStorageLocation().getFile().getAbsolutePath()); + info.getFailedStorageLocation().getNormalizedUri().toString()); long failureDate = info.getFailureDate(); if (failureDate > lastVolumeFailureDate) { lastVolumeFailureDate = failureDate; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java index 274627f9e25..070a76859a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java @@ -231,9 +231,9 @@ public class TestNameNodePrunesMissingStorages { // it would be re-initialized with a new storage ID.) assertNotNull(volumeLocationToRemove); datanodeToRemoveStorageFrom.shutdown(); - FileUtil.fullyDelete(volumeLocationToRemove.getFile()); + FileUtil.fullyDelete(new File(volumeLocationToRemove.getUri())); FileOutputStream fos = new FileOutputStream( - volumeLocationToRemove.getFile().toString()); + new File(volumeLocationToRemove.getUri())); try { fos.write(1); } finally { @@ -327,7 +327,8 @@ public class TestNameNodePrunesMissingStorages { final String newStorageId = DatanodeStorage.generateUuid(); try { File currentDir = new File( - volumeRefs.get(0).getStorageLocation().getFile(), "current"); + new File(volumeRefs.get(0).getStorageLocation().getUri()), + "current"); File versionFile = new File(currentDir, "VERSION"); rewriteVersionFile(versionFile, newStorageId); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java index c55a82883ec..6d35cc54f9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java @@ -29,6 +29,7 @@ import static org.junit.Assert.assertFalse; import java.io.Closeable; import java.io.File; import java.io.IOException; +import java.net.URI; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -549,9 +550,8 @@ public class TestBlockScanner { info.shouldRun = false; } ctx.datanode.shutdown(); - String vPath = ctx.volumes.get(0).getStorageLocation() - .getFile().getAbsolutePath(); - File cursorPath = new File(new File(new File(vPath, "current"), + URI vURI = ctx.volumes.get(0).getStorageLocation().getUri(); + File cursorPath = new File(new File(new File(new File(vURI), "current"), ctx.bpids[0]), "scanner.cursor"); assertTrue("Failed to find cursor save file in " + cursorPath.getAbsolutePath(), cursorPath.exists()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java index d41c13e612a..68828fbca1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java @@ -114,7 +114,8 @@ public class TestDataDirs { List checkedLocations = DataNode.checkStorageLocations(locations, fs, diskChecker); assertEquals("number of valid data dirs", 1, checkedLocations.size()); - String validDir = checkedLocations.iterator().next().getFile().getPath(); + String validDir = + new File(checkedLocations.iterator().next().getUri()).getPath(); assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 83c231d2332..5607ccc6df3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -221,7 +221,7 @@ public class TestDataNodeHotSwapVolumes { } assertFalse(oldLocations.isEmpty()); - String newPaths = oldLocations.get(0).getFile().getAbsolutePath() + + String newPaths = new File(oldLocations.get(0).getUri()).getAbsolutePath() + ",/foo/path1,/foo/path2"; DataNode.ChangedVolumes changedVolumes = @@ -229,18 +229,18 @@ public class TestDataNodeHotSwapVolumes { List newVolumes = changedVolumes.newLocations; assertEquals(2, newVolumes.size()); assertEquals(new File("/foo/path1").getAbsolutePath(), - newVolumes.get(0).getFile().getAbsolutePath()); + new File(newVolumes.get(0).getUri()).getAbsolutePath()); assertEquals(new File("/foo/path2").getAbsolutePath(), - newVolumes.get(1).getFile().getAbsolutePath()); + new File(newVolumes.get(1).getUri()).getAbsolutePath()); List removedVolumes = changedVolumes.deactivateLocations; assertEquals(1, removedVolumes.size()); - assertEquals(oldLocations.get(1).getFile(), - removedVolumes.get(0).getFile()); + assertEquals(oldLocations.get(1).getNormalizedUri(), + removedVolumes.get(0).getNormalizedUri()); assertEquals(1, changedVolumes.unchangedLocations.size()); - assertEquals(oldLocations.get(0).getFile(), - changedVolumes.unchangedLocations.get(0).getFile()); + assertEquals(oldLocations.get(0).getNormalizedUri(), + changedVolumes.unchangedLocations.get(0).getNormalizedUri()); } @Test @@ -519,7 +519,7 @@ public class TestDataNodeHotSwapVolumes { DFSTestUtil.getAllBlocks(fs, testFile).get(1).getBlock(); FsVolumeSpi volumeWithBlock = dn.getFSDataset().getVolume(block); String dirWithBlock = "[" + volumeWithBlock.getStorageType() + "]" + - volumeWithBlock.getStorageLocation().getFile().toURI(); + volumeWithBlock.getStorageLocation().getUri(); String newDirs = dirWithBlock; for (String dir : oldDirs) { if (dirWithBlock.startsWith(dir)) { @@ -577,7 +577,7 @@ public class TestDataNodeHotSwapVolumes { try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) { for (FsVolumeSpi volume : volumes) { - assertThat(volume.getStorageLocation().getFile().toString(), + assertThat(new File(volume.getStorageLocation().getUri()).toString(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2)))))); } } @@ -593,8 +593,10 @@ public class TestDataNodeHotSwapVolumes { dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(","); assertEquals(4, effectiveVolumes.length); for (String ev : effectiveVolumes) { - assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(), - is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2)))))); + assertThat( + new File(StorageLocation.parse(ev).getUri()).getCanonicalPath(), + is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))) + ); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 47f48231b4b..9ffe7b6c52f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -253,7 +253,7 @@ public class TestDataNodeVolumeFailure { FsDatasetSpi data = dn0.getFSDataset(); try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) { for (FsVolumeSpi volume : vols) { - assertFalse(volume.getStorageLocation().getFile() + assertFalse(new File(volume.getStorageLocation().getUri()) .getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath() )); } @@ -262,7 +262,7 @@ public class TestDataNodeVolumeFailure { // 3. all blocks on dn0Vol1 have been removed. for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) { assertNotNull(replica.getVolume()); - assertFalse(replica.getVolume().getStorageLocation().getFile() + assertFalse(new File(replica.getVolume().getStorageLocation().getUri()) .getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath() )); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index 4bb5e7abad2..b45dabf4c0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -29,6 +29,8 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; import java.util.ArrayList; import org.apache.commons.logging.Log; @@ -467,7 +469,8 @@ public class TestDataNodeVolumeFailureReporting { DataNodeTestUtils.triggerHeartbeat(dn); FsDatasetSpi fsd = dn.getFSDataset(); assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes()); - assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations()); + assertArrayEquals(expectedFailedVolumes, + convertToAbsolutePaths(fsd.getFailedStorageLocations())); // there shouldn't be any more volume failures due to I/O failure checkFailuresAtDataNode(dn, 0, false, expectedFailedVolumes); @@ -550,7 +553,8 @@ public class TestDataNodeVolumeFailureReporting { } LOG.info(strBuilder.toString()); assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes()); - assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations()); + assertArrayEquals(expectedFailedVolumes, + convertToAbsolutePaths(fsd.getFailedStorageLocations())); if (expectedFailedVolumes.length > 0) { assertTrue(fsd.getLastVolumeFailureDate() > 0); long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown, @@ -582,8 +586,9 @@ public class TestDataNodeVolumeFailureReporting { assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures()); VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary(); if (expectedFailedVolumes.length > 0) { - assertArrayEquals(expectedFailedVolumes, volumeFailureSummary - .getFailedStorageLocations()); + assertArrayEquals(expectedFailedVolumes, + convertToAbsolutePaths(volumeFailureSummary + .getFailedStorageLocations())); assertTrue(volumeFailureSummary.getLastVolumeFailureDate() > 0); long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown, expectedFailedVolumes.length); @@ -594,6 +599,30 @@ public class TestDataNodeVolumeFailureReporting { } } + /** + * Converts the provided paths to absolute file paths. + * @param locations the array of paths + * @return array of absolute paths + */ + private String[] convertToAbsolutePaths(String[] locations) { + if (locations == null || locations.length == 0) { + return new String[0]; + } + + String[] absolutePaths = new String[locations.length]; + for (int count = 0; count < locations.length; count++) { + try { + absolutePaths[count] = new File(new URI(locations[count])) + .getAbsolutePath(); + } catch (URISyntaxException e) { + //if the provided location is not an URI, + //we use it as the absolute path + absolutePaths[count] = locations[count]; + } + } + return absolutePaths; + } + /** * Returns expected capacity lost for use in assertions. The return value is * dependent on whether or not it is expected that the volume capacities were diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java index 446a77b2552..c56a01ca58e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java @@ -142,8 +142,8 @@ public class TestDataStorage { for (NamespaceInfo ni : namespaceInfos) { storage.addStorageLocations(mockDN, ni, locations, START_OPT); for (StorageLocation sl : locations) { - checkDir(sl.getFile()); - checkDir(sl.getFile(), ni.getBlockPoolID()); + checkDir(new File(sl.getUri())); + checkDir(new File(sl.getUri()), ni.getBlockPoolID()); } } @@ -173,8 +173,7 @@ public class TestDataStorage { List locations = createStorageLocations(numLocations); StorageLocation firstStorage = locations.get(0); - Storage.StorageDirectory sd = new Storage.StorageDirectory( - firstStorage.getFile()); + Storage.StorageDirectory sd = new Storage.StorageDirectory(firstStorage); // the directory is not initialized so VERSION does not exist // create a fake directory under current/ File currentDir = new File(sd.getCurrentDir(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 08a5af9d62b..d05e2a7fa9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -189,8 +189,8 @@ public class TestDirectoryScanner { // Volume without a copy of the block. Make a copy now. File sourceBlock = new File(b.getBlockURI()); File sourceMeta = new File(b.getMetadataURI()); - URI sourceRoot = b.getVolume().getStorageLocation().getFile().toURI(); - URI destRoot = v.getStorageLocation().getFile().toURI(); + URI sourceRoot = b.getVolume().getStorageLocation().getUri(); + URI destRoot = v.getStorageLocation().getUri(); String relativeBlockPath = sourceRoot.relativize(sourceBlock.toURI()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 2103392db95..56dee43a3bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -199,8 +199,7 @@ public class TestDiskError { try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) { for (FsVolumeSpi vol : volumes) { - String dir = vol.getStorageLocation().getFile().getAbsolutePath(); - Path dataDir = new Path(dir); + Path dataDir = new Path(vol.getStorageLocation().getNormalizedUri()); FsPermission actual = localFS.getFileStatus(dataDir).getPermission(); assertEquals("Permission for dir: " + dataDir + ", is " + actual + ", while expected is " + expected, expected, actual); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java index b42c052adbc..9095594fa4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java @@ -100,7 +100,7 @@ public class FsDatasetTestUtil { */ public static void assertFileLockReleased(String dir) throws IOException { StorageLocation sl = StorageLocation.parse(dir); - File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK); + File lockFile = new File(new File(sl.getUri()), Storage.STORAGE_FILE_LOCK); try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws"); FileChannel channel = raf.getChannel()) { FileLock lock = channel.tryLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index de258de6319..c7ba9d2e767 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -290,7 +290,7 @@ public class TestDFSAdmin { datanode.getConf()); if (expectedSuccuss) { assertThat(locations.size(), is(1)); - assertThat(locations.get(0).getFile(), is(newDir)); + assertThat(new File(locations.get(0).getUri()), is(newDir)); // Verify the directory is appropriately formatted. assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory()); } else {