diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index 3171e6bf5f8..da3807d3073 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -38,7 +38,6 @@ public class FileStatus implements Writable, Comparable { private boolean isdir; private short block_replication; private long blocksize; - private boolean isLazyPersist; private long modification_time; private long access_time; private FsPermission permission; @@ -74,18 +73,6 @@ public FileStatus(long length, boolean isdir, FsPermission permission, String owner, String group, Path symlink, Path path) { - this(length, isdir, block_replication, blocksize, false, - modification_time, access_time, permission, owner, group, - symlink, path); - } - - public FileStatus(long length, boolean isdir, - int block_replication, - long blocksize, boolean isLazyPersist, - long modification_time, long access_time, - FsPermission permission, String owner, String group, - Path symlink, - Path path) { this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; @@ -105,7 +92,6 @@ public FileStatus(long length, boolean isdir, this.group = (group == null) ? "" : group; this.symlink = symlink; this.path = path; - this.isLazyPersist = isLazyPersist; // The variables isdir and symlink indicate the type: // 1. isdir implies directory, in which case symlink must be null. // 2. !isdir implies a file or symlink, symlink != null implies a @@ -181,13 +167,6 @@ public long getBlockSize() { return blocksize; } - /** - * Get whether the file is lazyPersist. - */ - public boolean isLazyPersist() { - return isLazyPersist; - } - /** * Get the replication factor of a file. * @return the replication factor of a file. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index c3852df5ffd..a06e3a6eb24 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -762,7 +762,6 @@ private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) false, fs.getReplication(), fs.getBlockSize(), - fs.isLazyPersist(), fs.getModificationTime(), fs.getAccessTime(), fs.getPermission(), @@ -778,7 +777,7 @@ private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) * when available. */ if (!target.isEmpty()) { - return new FileStatus(0, false, 0, 0, false, 0, 0, FsPermission.getDefault(), + return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(), "", "", new Path(target), f); } // f refers to a file or directory that does not exist diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java index 8b32eb8e21f..ee56fe6492c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java @@ -39,7 +39,6 @@ * %u: User name of owner * %y: UTC date as "yyyy-MM-dd HH:mm:ss" * %Y: Milliseconds since January 1, 1970 UTC - * %l: Whether lazyPersist flag is set on the file. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -54,8 +53,7 @@ public static void registerCommands(CommandFactory factory) { public static final String DESCRIPTION = "Print statistics about the file/directory at " + "in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g), " + - "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y), " + - "lazyPersist flag (%l)\n"; + "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y)\n"; protected static final SimpleDateFormat timeFmt; static { @@ -93,7 +91,7 @@ protected void processPath(PathData item) throws IOException { break; case 'F': buf.append(stat.isDirectory() - ? "directory" + ? "directory" : (stat.isFile() ? "regular file" : "symlink")); break; case 'g': @@ -117,9 +115,6 @@ protected void processPath(PathData item) throws IOException { case 'Y': buf.append(stat.getModificationTime()); break; - case 'l': - buf.append(stat.isLazyPersist()); - break; default: // this leaves % alone, which causes the potential for // future format options to break strings; should use %% to diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml index 29d4174b0ce..c6e5fc5f0c2 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml @@ -798,7 +798,7 @@ RegexpComparator - ^( |\t)*\(%y, %Y\), lazyPersist flag \(\%l\)( )* + ^( |\t)*\(%y, %Y\)( )* diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 93d223fef7e..367308d421c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -159,7 +159,6 @@ public static FILE_TYPE getType(FileStatus fileStatus) { public static final String XATTR_NAME_JSON = "name"; public static final String XATTR_VALUE_JSON = "value"; public static final String XATTRNAMES_JSON = "XAttrNames"; - public static final String LAZY_PERSIST_JSON = "LazyPersist"; public static final String FILE_CHECKSUM_JSON = "FileChecksum"; public static final String CHECKSUM_ALGORITHM_JSON = "algorithm"; @@ -947,20 +946,19 @@ private FileStatus createFileStatus(Path parent, JSONObject json) { long mTime = (Long) json.get(MODIFICATION_TIME_JSON); long blockSize = (Long) json.get(BLOCK_SIZE_JSON); short replication = ((Long) json.get(REPLICATION_JSON)).shortValue(); - boolean isLazyPersist = ((Boolean) json.get(LAZY_PERSIST_JSON)).booleanValue(); FileStatus fileStatus = null; switch (type) { case FILE: case DIRECTORY: fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY), - replication, blockSize, false, mTime, aTime, - permission, owner, group, null, path); + replication, blockSize, mTime, aTime, + permission, owner, group, path); break; case SYMLINK: Path symLink = null; fileStatus = new FileStatus(len, false, - replication, blockSize, isLazyPersist, mTime, aTime, + replication, blockSize, mTime, aTime, permission, owner, group, symLink, path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index bcc0476c5fd..e7d92f59588 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -125,7 +125,6 @@ public Map toJsonInner(boolean emptyPathSuffix) { fileStatus.getModificationTime()); json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize()); json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication()); - json.put(HttpFSFileSystem.LAZY_PERSIST_JSON, fileStatus.isLazyPersist()); if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) { json.put(HttpFSFileSystem.ACL_BIT_JSON,true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt index d83af6cee69..2fa855a9e82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt @@ -86,3 +86,7 @@ HDFS-7153. Add storagePolicy to NN edit log during file creation. (Arpit Agarwal) + HDFS-7159. Use block storage policy to set lazy persist preference. + (Arpit Agarwal) + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 2691d11311a..781fb680b29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -75,6 +76,7 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.RetryStartFileException; @@ -172,6 +174,8 @@ public class DFSOutputStream extends FSOutputSummer private final AtomicReference cachingStrategy; private boolean failPacket = false; private FileEncryptionInfo fileEncryptionInfo; + private static final BlockStoragePolicySuite blockStoragePolicySuite = + BlockStoragePolicySuite.createDefaultSuite(); private static class Packet { private static final long HEART_BEAT_SEQNO = -1L; @@ -386,7 +390,7 @@ private DataStreamer(HdfsFileStatus stat) { */ private DataStreamer(HdfsFileStatus stat, Span span) { isAppend = false; - isLazyPersistFile = stat.isLazyPersist(); + isLazyPersistFile = initLazyPersist(stat); stage = BlockConstructionStage.PIPELINE_SETUP_CREATE; traceSpan = span; } @@ -406,7 +410,7 @@ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, block = lastBlock.getBlock(); bytesSent = block.getNumBytes(); accessToken = lastBlock.getBlockToken(); - isLazyPersistFile = stat.isLazyPersist(); + isLazyPersistFile = initLazyPersist(stat); long usedInLastBlock = stat.getLen() % blockSize; int freeInLastBlock = (int)(blockSize - usedInLastBlock); @@ -450,6 +454,13 @@ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, } } + private boolean initLazyPersist(HdfsFileStatus stat) { + final BlockStoragePolicy lpPolicy = + blockStoragePolicySuite.getPolicy("LAZY_PERSIST"); + return lpPolicy != null && + stat.getStoragePolicy() == lpPolicy.getId(); + } + private void setPipeline(LocatedBlock lb) { setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java index 99cae9aed29..88cc7d6b7e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java @@ -47,7 +47,13 @@ public enum StorageType { StorageType(boolean isTransient) { this.isTransient = isTransient; } - public boolean isMovable() { return isTransient == false; } + public boolean isTransient() { + return isTransient; + } + + public boolean isMovable() { + return !isTransient; + } public static List asList() { return Arrays.asList(VALUES); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java index 8ca83a0bd88..ef13e0ccc60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java @@ -49,15 +49,29 @@ public class BlockStoragePolicy { private final StorageType[] creationFallbacks; /** The fallback storage type for replication. */ private final StorageType[] replicationFallbacks; + /** + * Whether the policy is inherited during file creation. + * If set then the policy cannot be changed after file creation. + */ + private boolean copyOnCreateFile; @VisibleForTesting public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes, StorageType[] creationFallbacks, StorageType[] replicationFallbacks) { + this(id, name, storageTypes, creationFallbacks, replicationFallbacks, + false); + } + + @VisibleForTesting + public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes, + StorageType[] creationFallbacks, StorageType[] replicationFallbacks, + boolean copyOnCreateFile) { this.id = id; this.name = name; this.storageTypes = storageTypes; this.creationFallbacks = creationFallbacks; this.replicationFallbacks = replicationFallbacks; + this.copyOnCreateFile = copyOnCreateFile; } /** @@ -65,13 +79,22 @@ public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes, */ public List chooseStorageTypes(final short replication) { final List types = new LinkedList(); - int i = 0; - for(; i < replication && i < storageTypes.length; i++) { - types.add(storageTypes[i]); + int i = 0, j = 0; + + // Do not return transient storage types. We will not have accurate + // usage information for transient types. + for (;i < replication && j < storageTypes.length; ++j) { + if (!storageTypes[j].isTransient()) { + types.add(storageTypes[j]); + ++i; + } } + final StorageType last = storageTypes[storageTypes.length - 1]; - for(; i < replication; i++) { - types.add(last); + if (!last.isTransient()) { + for (; i < replication; i++) { + types.add(last); + } } return types; } @@ -241,4 +264,8 @@ private static StorageType getFallback(EnumSet unavailables, } return null; } + + public boolean isCopyOnCreateFile() { + return copyOnCreateFile; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index 4fa7fd651b4..94d9a92cfa2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -39,7 +39,6 @@ public class HdfsFileStatus { private final boolean isdir; private final short block_replication; private final long blocksize; - private final boolean isLazyPersist; private final long modification_time; private final long access_time; private final FsPermission permission; @@ -71,15 +70,14 @@ public class HdfsFileStatus { * @param feInfo the file's encryption info */ public HdfsFileStatus(long length, boolean isdir, int block_replication, - long blocksize, boolean isLazyPersist, long modification_time, - long access_time, FsPermission permission, String owner, - String group, byte[] symlink, byte[] path, long fileId, - int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy) { + long blocksize, long modification_time, long access_time, + FsPermission permission, String owner, String group, byte[] symlink, + byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo, + byte storagePolicy) { this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; this.blocksize = blocksize; - this.isLazyPersist = isLazyPersist; this.modification_time = modification_time; this.access_time = access_time; this.permission = (permission == null) ? @@ -129,13 +127,6 @@ public final long getBlockSize() { return blocksize; } - /** - * @return true if the file is lazyPersist. - */ - final public boolean isLazyPersist() { - return isLazyPersist; - } - /** * Get the replication factor of a file. * @return the replication factor of a file. @@ -270,7 +261,7 @@ public final byte getStoragePolicy() { public final FileStatus makeQualified(URI defaultUri, Path path) { return new FileStatus(getLen(), isDir(), getReplication(), - getBlockSize(), isLazyPersist(), getModificationTime(), + getBlockSize(), getModificationTime(), getAccessTime(), getPermission(), getOwner(), getGroup(), isSymlink() ? new Path(getSymlink()) : null, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index 644fb6fbb68..7e602bfcdfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -55,14 +55,13 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus { * @param feInfo file encryption info */ public HdfsLocatedFileStatus(long length, boolean isdir, - int block_replication, long blocksize, boolean isLazyPersist, - long modification_time, long access_time, FsPermission permission, - String owner, String group, byte[] symlink, byte[] path, long fileId, - LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo, - byte storagePolicy) { - super(length, isdir, block_replication, blocksize, isLazyPersist, - modification_time, access_time, permission, owner, group, symlink, - path, fileId, childrenNum, feInfo, storagePolicy); + int block_replication, long blocksize, long modification_time, + long access_time, FsPermission permission, String owner, String group, + byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, + int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy) { + super(length, isdir, block_replication, blocksize, modification_time, + access_time, permission, owner, group, symlink, path, fileId, + childrenNum, feInfo, storagePolicy); this.locations = locations; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index 2b2d921d79d..31feb1e53e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -60,7 +60,7 @@ public SnapshottableDirectoryStatus(long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] localName, long inodeId, int childrenNum, int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { - this.dirStatus = new HdfsFileStatus(0, true, 0, 0, false, modification_time, + this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, access_time, permission, owner, group, null, localName, inodeId, childrenNum, null, BlockStoragePolicySuite.ID_UNSPECIFIED); this.snapshotNumber = snapshotNumber; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4692356b7de..6470a1c91ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -1410,7 +1410,6 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) { return new HdfsLocatedFileStatus( fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), fs.getBlockReplication(), fs.getBlocksize(), - fs.hasIsLazyPersist() ? fs.getIsLazyPersist() : false, fs.getModificationTime(), fs.getAccessTime(), PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), fs.getFileType().equals(FileType.IS_SYMLINK) ? @@ -1460,7 +1459,6 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { setFileType(fType). setBlockReplication(fs.getReplication()). setBlocksize(fs.getBlockSize()). - setIsLazyPersist(fs.isLazyPersist()). setModificationTime(fs.getModificationTime()). setAccessTime(fs.getAccessTime()). setPermission(PBHelper.convert(fs.getPermission())). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index dbe039c9b39..9ef227400c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -54,12 +54,6 @@ public interface BlockCollection { */ public long getPreferredBlockSize(); - /** - * Return true if the file was created with {@Link CreateFlag#LAZY_PERSIST}. - * @return - */ - public boolean getLazyPersistFlag(); - /** * Get block replication for the collection * @return block replication value diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index fe85155e41b..640791f85c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -402,6 +402,10 @@ public BlockStoragePolicy getStoragePolicy(final String policyName) { return storagePolicySuite.getPolicy(policyName); } + public BlockStoragePolicy getStoragePolicy(final byte policyId) { + return storagePolicySuite.getPolicy(policyId); + } + public BlockStoragePolicy[] getStoragePolicies() { return storagePolicySuite.getAllPolicies(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index 1d162a0b3c6..caaabad9341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -44,6 +44,12 @@ public class BlockStoragePolicySuite { public static BlockStoragePolicySuite createDefaultSuite() { final BlockStoragePolicy[] policies = new BlockStoragePolicy[1 << ID_BIT_LENGTH]; + final byte lazyPersistId = 15; + policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, "LAZY_PERSIST", + new StorageType[]{StorageType.RAM_DISK, StorageType.DISK}, + new StorageType[]{StorageType.DISK}, + new StorageType[]{StorageType.DISK}, + true); // Cannot be changed on regular files, but inherited. final byte hotId = 12; policies[hotId] = new BlockStoragePolicy(hotId, "HOT", new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index be72266ec9c..7abed90eeec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -294,7 +294,7 @@ private void addVolume(Collection dataLocations, // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is // nothing needed to be rolled back to make various data structures, e.g., // storageMap and asyncDiskService, consistent. - FsVolumeImpl fsVolume = FsVolumeImplAllocator.createVolume( + FsVolumeImpl fsVolume = new FsVolumeImpl( this, sd.getStorageUuid(), dir, this.conf, storageType); ReplicaMap tempVolumeMap = new ReplicaMap(this); fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsTransientVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsTransientVolumeImpl.java deleted file mode 100644 index dafa74f5615..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsTransientVolumeImpl.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; - -import java.io.File; -import java.io.IOException; -import java.util.*; -import java.util.concurrent.ThreadPoolExecutor; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.StorageType; - -/** - * Volume for storing replicas in memory. These can be deleted at any time - * to make space for new replicas and there is no persistence guarantee. - * - * The backing store for these replicas is expected to be RAM_DISK. - * The backing store may be disk when testing. - * - * It uses the {@link FsDatasetImpl} object for synchronization. - */ -@InterfaceAudience.Private -@VisibleForTesting -public class FsTransientVolumeImpl extends FsVolumeImpl { - - - FsTransientVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir, - Configuration conf, StorageType storageType) - throws IOException { - super(dataset, storageID, currentDir, conf, storageType); - } - - @Override - protected ThreadPoolExecutor initializeCacheExecutor(File parent) { - // Can't 'cache' replicas already in RAM. - return null; - } - - @Override - public boolean isTransientStorage() { - return true; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 9e8499c26e1..60fb71d63b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -96,6 +96,10 @@ public class FsVolumeImpl implements FsVolumeSpi { } protected ThreadPoolExecutor initializeCacheExecutor(File parent) { + if (storageType.isTransient()) { + return null; + } + final int maxNumThreads = dataset.datanode.getConf().getInt( DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY, DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT); @@ -203,7 +207,7 @@ public String getBasePath() { @Override public boolean isTransientStorage() { - return false; + return storageType.isTransient(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplAllocator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplAllocator.java deleted file mode 100644 index 14d3aaffef3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplAllocator.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.StorageType; - -/** - * Generate volumes based on the storageType. - */ -@InterfaceAudience.Private -class FsVolumeImplAllocator { - static FsVolumeImpl createVolume(FsDatasetImpl fsDataset, String storageUuid, - File dir, Configuration conf, StorageType storageType) - throws IOException { - switch(storageType) { - case RAM_DISK: - return new FsTransientVolumeImpl( - fsDataset, storageUuid, dir, conf, storageType); - default: - return new FsVolumeImpl( - fsDataset, storageUuid, dir, conf, storageType); - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ab9e8b38392..ed8523dda2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; @@ -277,18 +278,18 @@ void disableQuotaChecks() { } private static INodeFile newINodeFile(long id, PermissionStatus permissions, - long mtime, long atime, short replication, long preferredBlockSize, - boolean isLazyPersist) { - return newINodeFile(id, permissions, mtime, atime, replication, preferredBlockSize, - isLazyPersist, (byte)0); + long mtime, long atime, short replication, long preferredBlockSize) { + return new INodeFile(id, null, permissions, mtime, atime, + BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize, + (byte) 0); } private static INodeFile newINodeFile(long id, PermissionStatus permissions, long mtime, long atime, short replication, long preferredBlockSize, - boolean isLazyPersist, byte storagePolicyId) { + byte storagePolicyId) { return new INodeFile(id, null, permissions, mtime, atime, BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize, - isLazyPersist, storagePolicyId); + storagePolicyId); } /** @@ -300,15 +301,13 @@ private static INodeFile newINodeFile(long id, PermissionStatus permissions, */ INodeFile addFile(String path, PermissionStatus permissions, short replication, long preferredBlockSize, - boolean isLazyPersist, String clientName, String clientMachine) throws FileAlreadyExistsException, QuotaExceededException, UnresolvedLinkException, SnapshotAccessControlException, AclException { long modTime = now(); INodeFile newNode = newINodeFile(namesystem.allocateNewInodeId(), - permissions, modTime, modTime, replication, preferredBlockSize, - isLazyPersist); + permissions, modTime, modTime, replication, preferredBlockSize); newNode.toUnderConstruction(clientName, clientMachine); boolean added = false; @@ -338,7 +337,6 @@ INodeFile unprotectedAddFile( long id, long modificationTime, long atime, long preferredBlockSize, - boolean isLazyPersist, boolean underConstruction, String clientName, String clientMachine, @@ -347,12 +345,12 @@ INodeFile unprotectedAddFile( long id, assert hasWriteLock(); if (underConstruction) { newNode = newINodeFile(id, permissions, modificationTime, - modificationTime, replication, preferredBlockSize, isLazyPersist, storagePolicyId); + modificationTime, replication, preferredBlockSize, storagePolicyId); newNode.toUnderConstruction(clientName, clientMachine); } else { newNode = newINodeFile(id, permissions, modificationTime, atime, - replication, preferredBlockSize, isLazyPersist, storagePolicyId); + replication, preferredBlockSize, storagePolicyId); } try { @@ -1040,6 +1038,20 @@ void unprotectedSetStoragePolicy(String src, byte policyId) } final int snapshotId = iip.getLatestSnapshotId(); if (inode.isFile()) { + BlockStoragePolicy newPolicy = getBlockManager().getStoragePolicy(policyId); + if (newPolicy.isCopyOnCreateFile()) { + throw new HadoopIllegalArgumentException( + "Policy " + newPolicy + " cannot be set after file creation."); + } + + BlockStoragePolicy currentPolicy = + getBlockManager().getStoragePolicy(inode.getLocalStoragePolicyID()); + + if (currentPolicy != null && currentPolicy.isCopyOnCreateFile()) { + throw new HadoopIllegalArgumentException( + "Existing policy " + currentPolicy.getName() + + " cannot be changed after file creation."); + } inode.asFile().setStoragePolicyID(policyId, snapshotId); } else if (inode.isDirectory()) { setDirStoragePolicy(inode.asDirectory(), policyId, snapshotId); @@ -1546,7 +1558,7 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink, private HdfsFileStatus getFileInfo4DotSnapshot(String src) throws UnresolvedLinkException { if (getINode4DotSnapshot(src) != null) { - return new HdfsFileStatus(0, true, 0, 0, false, 0, 0, null, null, null, null, + return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, BlockStoragePolicySuite.ID_UNSPECIFIED); } @@ -2406,7 +2418,6 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, byte storagePolicy, long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; - boolean isLazyPersist = false; final boolean isEncrypted; final FileEncryptionInfo feInfo = isRawPath ? null : @@ -2417,7 +2428,6 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, byte storagePolicy, size = fileNode.computeFileSize(snapshot); replication = fileNode.getFileReplication(snapshot); blocksize = fileNode.getPreferredBlockSize(); - isLazyPersist = fileNode.getLazyPersistFlag(); isEncrypted = (feInfo != null) || (isRawPath && isInAnEZ(INodesInPath.fromINode(node))); } else { @@ -2432,7 +2442,6 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, byte storagePolicy, node.isDirectory(), replication, blocksize, - isLazyPersist, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(node, snapshot, isEncrypted), @@ -2456,7 +2465,6 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, INode node, long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; - boolean isLazyPersist = false; LocatedBlocks loc = null; final boolean isEncrypted; final FileEncryptionInfo feInfo = isRawPath ? null : @@ -2466,7 +2474,6 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, INode node, size = fileNode.computeFileSize(snapshot); replication = fileNode.getFileReplication(snapshot); blocksize = fileNode.getPreferredBlockSize(); - isLazyPersist = fileNode.getLazyPersistFlag(); final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID; final boolean isUc = !inSnapshot && fileNode.isUnderConstruction(); @@ -2489,7 +2496,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, INode node, HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, - blocksize, isLazyPersist, node.getModificationTime(snapshot), + blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(node, snapshot, isEncrypted), node.getUserName(snapshot), node.getGroupName(snapshot), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 114e1099d60..e952e7cb9e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -714,7 +714,6 @@ public void logOpenFile(String path, INodeFile newNode, boolean overwrite, .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) - .setLazyPersistFlag(newNode.getLazyPersistFlag()) .setBlocks(newNode.getBlocks()) .setPermissionStatus(permissions) .setClientName(newNode.getFileUnderConstructionFeature().getClientName()) @@ -747,7 +746,6 @@ public void logCloseFile(String path, INodeFile newNode) { .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) - .setLazyPersistFlag(newNode.getLazyPersistFlag()) .setBlocks(newNode.getBlocks()) .setPermissionStatus(newNode.getPermissionStatus()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index a8b76629533..1b2b4ae625d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -365,8 +365,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, path, addCloseOp.permissions, addCloseOp.aclEntries, addCloseOp.xAttrs, replication, addCloseOp.mtime, addCloseOp.atime, - addCloseOp.blockSize, addCloseOp.isLazyPersist, - true, addCloseOp.clientName, + addCloseOp.blockSize, true, addCloseOp.clientName, addCloseOp.clientMachine, addCloseOp.storagePolicyId); fsNamesys.leaseManager.addLease(addCloseOp.clientName, path); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 107a0917e0c..c9eb9fe1a90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -404,7 +404,6 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin long mtime; long atime; long blockSize; - boolean isLazyPersist; Block[] blocks; PermissionStatus permissions; List aclEntries; @@ -416,6 +415,7 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin private AddCloseOp(FSEditLogOpCodes opCode) { super(opCode); + storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED; assert(opCode == OP_ADD || opCode == OP_CLOSE); } @@ -454,11 +454,6 @@ T setBlockSize(long blockSize) { return (T)this; } - T setLazyPersistFlag(boolean isLazyPersist) { - this.isLazyPersist = isLazyPersist; - return (T)this; - } - T setBlocks(Block[] blocks) { if (blocks.length > MAX_BLOCKS) { throw new RuntimeException("Can't have more than " + MAX_BLOCKS + @@ -516,7 +511,6 @@ public void writeFields(DataOutputStream out) throws IOException { FSImageSerialization.writeLong(mtime, out); FSImageSerialization.writeLong(atime, out); FSImageSerialization.writeLong(blockSize, out); - FSImageSerialization.writeInt((isLazyPersist ? 1 : 0), out); new ArrayWritable(Block.class, blocks).write(out); permissions.write(out); @@ -586,13 +580,6 @@ void readFields(DataInputStream in, int logVersion) this.blockSize = readLong(in); } - if (NameNodeLayoutVersion.supports( - NameNodeLayoutVersion.Feature.LAZY_PERSIST_FILES, logVersion)) { - this.isLazyPersist = (FSImageSerialization.readInt(in) != 0); - } else { - this.isLazyPersist = false; - } - this.blocks = readBlocks(in, logVersion); this.permissions = PermissionStatus.read(in); @@ -658,8 +645,6 @@ public String stringifyMembers() { builder.append(atime); builder.append(", blockSize="); builder.append(blockSize); - builder.append(", lazyPersist"); - builder.append(isLazyPersist); builder.append(", blocks="); builder.append(Arrays.toString(blocks)); builder.append(", permissions="); @@ -700,8 +685,6 @@ protected void toXml(ContentHandler contentHandler) throws SAXException { Long.toString(atime)); XMLUtils.addSaxString(contentHandler, "BLOCKSIZE", Long.toString(blockSize)); - XMLUtils.addSaxString(contentHandler, "LAZY_PERSIST", - Boolean.toString(isLazyPersist)); XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName); XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine); XMLUtils.addSaxString(contentHandler, "OVERWRITE", @@ -728,10 +711,6 @@ void fromXml(Stanza st) throws InvalidXmlException { this.atime = Long.parseLong(st.getValue("ATIME")); this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE")); - String lazyPersistString = st.getValueOrNull("LAZY_PERSIST"); - this.isLazyPersist = - lazyPersistString != null && Boolean.parseBoolean(lazyPersistString); - this.clientName = st.getValue("CLIENT_NAME"); this.clientMachine = st.getValue("CLIENT_MACHINE"); this.overwrite = Boolean.parseBoolean(st.getValueOrNull("OVERWRITE")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index f53afe1b9ea..b1e7dfeeb91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -784,8 +784,6 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, counter.increment(); } - // Images in the old format will not have the lazyPersist flag so it is - // safe to pass false always. final INodeFile file = new INodeFile(inodeId, localName, permissions, modificationTime, atime, blocks, replication, blockSize); if (underConstruction) { @@ -887,10 +885,8 @@ public INodeFileAttributes loadINodeFileAttributes(DataInput in) in.readShort()); final long preferredBlockSize = in.readLong(); - // LazyPersist flag will not be present in old image formats and hence - // can be safely set to false always. return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime, - accessTime, replication, preferredBlockSize, false, (byte) 0, null); + accessTime, replication, preferredBlockSize, (byte) 0, null); } public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 81bce86794e..321a14855ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -291,7 +291,6 @@ private INodeFile loadINodeFile(INodeSection.INode n) { final INodeFile file = new INodeFile(n.getId(), n.getName().toByteArray(), permissions, f.getModificationTime(), f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(), - f.hasIsLazyPersist() ? f.getIsLazyPersist() : false, (byte)f.getStoragePolicyID()); if (f.hasAcl()) { @@ -404,7 +403,6 @@ public static INodeSection.INodeFile.Builder buildINodeFile( .setPermission(buildPermissionStatus(file, state.getStringMap())) .setPreferredBlockSize(file.getPreferredBlockSize()) .setReplication(file.getFileReplication()) - .setIsLazyPersist(file.getLazyPersistFlag()) .setStoragePolicyID(file.getLocalStoragePolicyID()); AclFeature f = file.getAclFeature(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 8cf4d61f465..082e3bfa439 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -362,7 +362,7 @@ private void logAuditEvent(boolean succeeded, Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null; Path path = dst != null ? new Path(dst) : new Path(src); status = new FileStatus(stat.getLen(), stat.isDir(), - stat.getReplication(), stat.getBlockSize(), stat.isLazyPersist(), + stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), symlink, path); @@ -2340,6 +2340,7 @@ private void setStoragePolicyInt(String src, final String policyName) } src = FSDirectory.resolvePath(src, pathComponents, dir); + INode inode = dir.getINode(src); // get the corresponding policy and make sure the policy name is valid BlockStoragePolicy policy = blockManager.getStoragePolicy(policyName); @@ -2726,7 +2727,7 @@ private BlocksMapUpdateInfo startFileInternal(FSPermissionChecker pc, if (parent != null && mkdirsRecursively(parent.toString(), permissions, true, now())) { newNode = dir.addFile(src, permissions, replication, blockSize, - isLazyPersist, holder, clientMachine); + holder, clientMachine); } if (newNode == null) { @@ -2741,6 +2742,8 @@ permissions, true, now())) { newNode = dir.getInode(newNode.getId()).asFile(); } + setNewINodeStoragePolicy(newNode, iip, isLazyPersist); + // record file record in log, record new generation stamp getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry); if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -2755,6 +2758,37 @@ permissions, true, now())) { } } + private void setNewINodeStoragePolicy(INodeFile inode, + INodesInPath iip, + boolean isLazyPersist) + throws IOException { + + if (isLazyPersist) { + BlockStoragePolicy lpPolicy = + blockManager.getStoragePolicy("LAZY_PERSIST"); + + // Set LAZY_PERSIST storage policy if the flag was passed to + // CreateFile. + if (lpPolicy == null) { + throw new HadoopIllegalArgumentException( + "The LAZY_PERSIST storage policy has been disabled " + + "by the administrator."); + } + inode.setStoragePolicyID(lpPolicy.getId(), + iip.getLatestSnapshotId()); + } else { + BlockStoragePolicy effectivePolicy = + blockManager.getStoragePolicy(inode.getStoragePolicyID()); + + if (effectivePolicy != null && + effectivePolicy.isCopyOnCreateFile()) { + // Copy effective policy from ancestor directory to current file. + inode.setStoragePolicyID(effectivePolicy.getId(), + iip.getLatestSnapshotId()); + } + } + } + /** * Append to an existing file for append. *

@@ -2794,8 +2828,11 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src, + src + " for client " + clientMachine); } INodeFile myFile = INodeFile.valueOf(inode, src, true); + final BlockStoragePolicy lpPolicy = + blockManager.getStoragePolicy("LAZY_PERSIST"); - if (myFile.getLazyPersistFlag()) { + if (lpPolicy != null && + lpPolicy.getId() == myFile.getStoragePolicyID()) { throw new UnsupportedOperationException( "Cannot append to lazy persist file " + src); } @@ -5145,6 +5182,8 @@ private void clearCorruptLazyPersistFiles() throws SafeModeException, AccessControlException, UnresolvedLinkException, IOException { + BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST"); + List filesToDelete = new ArrayList(); writeLock(); @@ -5155,7 +5194,7 @@ private void clearCorruptLazyPersistFiles() while (it.hasNext()) { Block b = it.next(); BlockInfo blockInfo = blockManager.getStoredBlock(b); - if (blockInfo.getBlockCollection().getLazyPersistFlag()) { + if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) { filesToDelete.add(blockInfo.getBlockCollection()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index d2a2de33f19..dde36c307de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -78,10 +78,9 @@ public static INodeFile valueOf(INode inode, String path, boolean acceptNull) */ static enum HeaderFormat { PREFERRED_BLOCK_SIZE(null, 48, 1), - REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 11, 1), + REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 12, 1), STORAGE_POLICY_ID(REPLICATION.BITS, BlockStoragePolicySuite.ID_BIT_LENGTH, - 0), - LAZY_PERSIST(STORAGE_POLICY_ID.BITS, 1, 0); + 0); private final LongBitFormat BITS; @@ -97,21 +96,16 @@ static long getPreferredBlockSize(long header) { return PREFERRED_BLOCK_SIZE.BITS.retrieve(header); } - static boolean getLazyPersistFlag(long header) { - return LAZY_PERSIST.BITS.retrieve(header) == 0 ? false : true; - } - static byte getStoragePolicyID(long header) { return (byte)STORAGE_POLICY_ID.BITS.retrieve(header); } static long toLong(long preferredBlockSize, short replication, - boolean isLazyPersist, byte storagePolicyID) { + byte storagePolicyID) { long h = 0; h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h); h = REPLICATION.BITS.combine(replication, h); h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h); - h = LAZY_PERSIST.BITS.combine(isLazyPersist ? 1 : 0, h); return h; } @@ -125,15 +119,14 @@ static long toLong(long preferredBlockSize, short replication, long atime, BlockInfo[] blklist, short replication, long preferredBlockSize) { this(id, name, permissions, mtime, atime, blklist, replication, - preferredBlockSize, false, (byte) 0); + preferredBlockSize, (byte) 0); } INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime, BlockInfo[] blklist, short replication, - long preferredBlockSize, boolean isLazyPersist, byte storagePolicyID) { + long preferredBlockSize, byte storagePolicyID) { super(id, name, permissions, mtime, atime); - header = HeaderFormat.toLong(preferredBlockSize, replication, - isLazyPersist, storagePolicyID); + header = HeaderFormat.toLong(preferredBlockSize, replication, storagePolicyID); this.blocks = blklist; } @@ -381,11 +374,6 @@ public long getPreferredBlockSize() { return HeaderFormat.getPreferredBlockSize(header); } - @Override - public boolean getLazyPersistFlag() { - return HeaderFormat.getLazyPersistFlag(header); - } - @Override public byte getLocalStoragePolicyID() { return HeaderFormat.getStoragePolicyID(header); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java index c9dd66d1340..0f85bab5feb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java @@ -32,8 +32,6 @@ public interface INodeFileAttributes extends INodeAttributes { /** @return preferred block size in bytes */ public long getPreferredBlockSize(); - public boolean getLazyPersistFlag(); - /** @return the header as a long. */ public long getHeaderLong(); @@ -48,12 +46,12 @@ public static class SnapshotCopy extends INodeAttributes.SnapshotCopy public SnapshotCopy(byte[] name, PermissionStatus permissions, AclFeature aclFeature, long modificationTime, long accessTime, - short replication, long preferredBlockSize, boolean isLazyPersist, + short replication, long preferredBlockSize, byte storagePolicyID, XAttrFeature xAttrsFeature) { super(name, permissions, aclFeature, modificationTime, accessTime, xAttrsFeature); header = HeaderFormat.toLong(preferredBlockSize, replication, - isLazyPersist, storagePolicyID); + storagePolicyID); } public SnapshotCopy(INodeFile file) { @@ -71,9 +69,6 @@ public long getPreferredBlockSize() { return HeaderFormat.getPreferredBlockSize(header); } - @Override - public boolean getLazyPersistFlag() { return HeaderFormat.getLazyPersistFlag(header); } - @Override public byte getLocalStoragePolicyID() { return HeaderFormat.getStoragePolicyID(header); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index 323b239ad4f..512913b3acc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -69,9 +69,7 @@ public static enum Feature implements LayoutFeature { CREATE_OVERWRITE(-58, "Use single editlog record for " + "creating file with overwrite"), XATTRS_NAMESPACE_EXT(-59, "Increase number of xattr namespaces"), - BLOCK_STORAGE_POLICY(-60, "Block Storage policy"), - LAZY_PERSIST_FILES(-60, "Support for optional lazy persistence of " + - " files with reduced durability guarantees"); + BLOCK_STORAGE_POLICY(-60, "Block Storage policy"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 0312f6e1293..ff332253c80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -221,7 +221,6 @@ private void loadFileDiffList(InputStream in, INodeFile file, int size) .toByteArray(), permission, acl, fileInPb.getModificationTime(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(), fileInPb.getPreferredBlockSize(), - fileInPb.hasIsLazyPersist() ? fileInPb.getIsLazyPersist() : false, (byte)fileInPb.getStoragePolicyID(), xAttrs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index 7ad1c597027..bab83a132f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -391,7 +391,6 @@ private long getINodeId(String strPath) { f.getPermission(), stringTable); map.put("accessTime", f.getAccessTime()); map.put("blockSize", f.getPreferredBlockSize()); - map.put("lazyPersist", f.getIsLazyPersist()); map.put("group", p.getGroupName()); map.put("length", getFileSize(f)); map.put("modificationTime", f.getModificationTime()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java index 744fc754b91..99617b805ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java @@ -247,10 +247,6 @@ private void dumpINodeFile(INodeSection.INodeFile f) { .o("perferredBlockSize", f.getPreferredBlockSize()) .o("permission", dumpPermission(f.getPermission())); - if (f.hasIsLazyPersist()) { - o("lazyPersist", f.getIsLazyPersist()); - } - if (f.getBlocksCount() > 0) { out.print(""); for (BlockProto b : f.getBlocksList()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index e67cc6b9541..0a2ae2652db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -269,7 +269,7 @@ public static HdfsFileStatus toFileStatus(final Map json, boolean includes (byte) (long) (Long) m.get("storagePolicy") : BlockStoragePolicySuite.ID_UNSPECIFIED; return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication, - blockSize, isLazyPersist, mTime, aTime, permission, owner, group, + blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto index 85edfaba868..588f6c86122 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto @@ -139,7 +139,6 @@ message INodeSection { optional AclFeatureProto acl = 8; optional XAttrFeatureProto xAttrs = 9; optional uint32 storagePolicyID = 10; - optional bool isLazyPersist = 11 [default = false]; } message INodeDirectory { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index a0f0b09d861..10af3b8b61b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -310,7 +310,6 @@ message HdfsFileStatusProto { optional FileEncryptionInfoProto fileEncryptionInfo = 15; optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id - optional bool isLazyPersist = 17 [default = false]; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 67b83de1ba4..9c9111c7a5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -254,12 +254,12 @@ public Object answer(InvocationOnMock invocation) anyLong(), any(String[].class))).thenAnswer(answer); Mockito.doReturn( - new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission( + new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)).when(mockNN).getFileInfo(anyString()); Mockito.doReturn( - new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission( + new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)) .when(mockNN) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index f34f76086af..df1864c7e76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -716,7 +716,7 @@ public void testVersionAndSuiteNegotiation() throws Exception { private static void mockCreate(ClientProtocol mcp, CipherSuite suite, CryptoProtocolVersion version) throws Exception { Mockito.doReturn( - new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission( + new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, new FileEncryptionInfo(suite, version, new byte[suite.getAlgorithmBlockSize()], diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index e88b64e53e8..5d93db4e47a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -342,12 +342,12 @@ public void testFactory() throws Exception { } Mockito.doReturn( - new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission( + new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( - new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission( + new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)) .when(mcp) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index 95404b31b45..928d0d0e5c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -56,6 +56,7 @@ import static org.apache.hadoop.hdfs.StorageType.DEFAULT; import static org.apache.hadoop.hdfs.StorageType.RAM_DISK; import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsNot.not; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; @@ -68,6 +69,8 @@ public class TestLazyPersistFiles { ((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL); } + private static final byte LAZY_PERSIST_POLICY_ID = (byte) 15; + private static final int THREADPOOL_SIZE = 10; private static final short REPL_FACTOR = 1; @@ -100,19 +103,20 @@ public void shutDownCluster() throws IOException { } @Test (timeout=300000) - public void testFlagNotSetByDefault() throws IOException { + public void testPolicyNotSetByDefault() throws IOException { startUpCluster(false, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, 0, false); - // Stat the file and check that the lazyPersist flag is returned back. + // Stat the file and check that the LAZY_PERSIST policy is not + // returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.isLazyPersist(), is(false)); + assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID)); } @Test (timeout=300000) - public void testFlagPropagation() throws IOException { + public void testPolicyPropagation() throws IOException { startUpCluster(false, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); @@ -120,11 +124,11 @@ public void testFlagPropagation() throws IOException { makeTestFile(path, 0, true); // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.isLazyPersist(), is(true)); + assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); } @Test (timeout=300000) - public void testFlagPersistenceInEditLog() throws IOException { + public void testPolicyPersistenceInEditLog() throws IOException { startUpCluster(false, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); @@ -134,11 +138,11 @@ public void testFlagPersistenceInEditLog() throws IOException { // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.isLazyPersist(), is(true)); + assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); } @Test (timeout=300000) - public void testFlagPersistenceInFsImage() throws IOException { + public void testPolicyPersistenceInFsImage() throws IOException { startUpCluster(false, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); @@ -152,7 +156,7 @@ public void testFlagPersistenceInFsImage() throws IOException { // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.isLazyPersist(), is(true)); + assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); } @Test (timeout=300000) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java index 4270f8cd255..b6ac2870f58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java @@ -255,7 +255,7 @@ private void startUpCluster(final int numDataNodes, for (FsVolumeSpi volume : volumes) { if (volume.getStorageType() == RAM_DISK) { - ((FsTransientVolumeImpl) volume).setCapacityForTesting(ramDiskStorageLimit); + ((FsVolumeImpl) volume).setCapacityForTesting(ramDiskStorageLimit); } } } @@ -263,13 +263,6 @@ private void startUpCluster(final int numDataNodes, LOG.info("Cluster startup complete"); } - private void startUpCluster(final int numDataNodes, - final StorageType[] storageTypes, - final long ramDiskStorageLimit) - throws IOException { - startUpCluster(numDataNodes, storageTypes, ramDiskStorageLimit, false); - } - private void makeTestFile(Path path, long length, final boolean isLazyPersist) throws IOException { @@ -324,14 +317,6 @@ private void makeRandomTestFile(Path path, long length, final boolean isLazyPers BLOCK_SIZE, REPL_FACTOR, seed, true); } - private boolean verifyReadRandomFile( - Path path, int fileLength, int seed) throws IOException { - byte contents[] = DFSTestUtil.readFileBuffer(fs, path); - byte expected[] = DFSTestUtil. - calculateFileContentsFromSeed(seed, fileLength); - return Arrays.equals(contents, expected); - } - private void triggerBlockReport() throws IOException, InterruptedException { // Trigger block report to NN diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 7a0ca639c18..e16df16306e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1018,7 +1018,7 @@ public void testFsckFileNotFound() throws Exception { byte storagePolicy = 0; HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, - blockSize, false, modTime, accessTime, perms, owner, group, symlink, + blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy); Result res = new Result(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index df42a59c2d0..4221ad5342a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -87,7 +87,7 @@ INodeFile createINodeFile(short replication, long preferredBlockSize) { private static INodeFile createINodeFile(byte storagePolicyID) { return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, - null, (short)3, 1024L, false, storagePolicyID); + null, (short)3, 1024L, storagePolicyID); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 4e9691f1b5b..3eba7db9c2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -62,7 +62,7 @@ public void testHdfsFileStatus() { final long now = Time.now(); final String parent = "/dir"; final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, - false, now, now + 10, new FsPermission((short) 0644), "user", "group", + now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0); final FileStatus fstatus = toFileStatus(status, parent); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index b8fba037e39..087c3ab48aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -4554,42 +4554,6 @@ - - put: The LazyPersist flag is set when requested - - -fs NAMENODE -mkdir /dirLp - -fs NAMENODE -put -l CLITEST_DATA/data15bytes /dirLp/data15bytes - -fs NAMENODE -stat %l /dirLp/data15bytes - - - -fs NAMENODE -rm -r /dirLp/ - - - - RegexpComparator - ^true - - - - - - put: The LazyPersist flag is not set by default - - -fs NAMENODE -mkdir /dirLp - -fs NAMENODE -put CLITEST_DATA/data15bytes /dirLp/data15bytes - -fs NAMENODE -stat %l /dirLp/data15bytes - - - -fs NAMENODE -rm -r /dirLp/ - - - - RegexpComparator - ^false - - - - copyFromLocal: copying file into a file (absolute path)