HDFS-12681. Fold HdfsLocatedFileStatus into HdfsFileStatus.

This commit is contained in:
Chris Douglas 2017-11-03 14:30:57 -07:00
parent 4d2dce40bb
commit b85603e3f8
12 changed files with 151 additions and 217 deletions

View File

@ -135,7 +135,16 @@ public class LocatedFileStatus extends FileStatus {
public BlockLocation[] getBlockLocations() {
return locations;
}
/**
* Hook for subclasses to lazily set block locations. The {@link #locations}
* field should be null before this is called.
* @param locations Block locations for this instance.
*/
protected void setBlockLocations(BlockLocation[] locations) {
this.locations = locations;
}
/**
* Compare this FileStatus to another FileStatus
* @param o the FileStatus to be compared.

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
@ -188,8 +187,7 @@ public class Hdfs extends AbstractFileSystem {
@Override
public LocatedFileStatus next() throws IOException {
return ((HdfsLocatedFileStatus)getNext()).makeQualifiedLocated(
getUri(), p);
return getNext().makeQualifiedLocated(getUri(), p);
}
};
}

View File

@ -85,7 +85,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@ -1212,8 +1211,7 @@ public class DistributedFileSystem extends FileSystem {
T next;
HdfsFileStatus fileStat = thisListing.getPartialListing()[i++];
if (needLocation) {
next = (T)((HdfsLocatedFileStatus)fileStat)
.makeQualifiedLocated(getUri(), p);
next = (T)fileStat.makeQualifiedLocated(getUri(), p);
} else {
next = (T)fileStat.makeQualified(getUri(), p);
}

View File

@ -26,15 +26,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient;
/** Interface that represents the over the wire information for a file.
/**
* HDFS metadata for an entity in the filesystem.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HdfsFileStatus extends FileStatus {
public final class HdfsFileStatus extends LocatedFileStatus {
private static final long serialVersionUID = 0x126eb82a;
@ -49,11 +51,12 @@ public class HdfsFileStatus extends FileStatus {
private final int childrenNum;
private final byte storagePolicy;
// BlockLocations[] is the user-facing type
private transient LocatedBlocks hdfsloc;
public static final byte[] EMPTY_NAME = new byte[0];
/**
* Set of features potentially active on an instance.
*/
/** Set of features potentially active on an instance. */
public enum Flags {
HAS_ACL,
HAS_CRYPT,
@ -81,18 +84,19 @@ public class HdfsFileStatus extends FileStatus {
* @param storagePolicy ID which specifies storage policy
* @param ecPolicy the erasure coding policy
*/
protected HdfsFileStatus(long length, boolean isdir, int replication,
private HdfsFileStatus(long length, boolean isdir, int replication,
long blocksize, long mtime, long atime,
FsPermission permission, EnumSet<Flags> flags,
String owner, String group,
byte[] symlink, byte[] path, long fileId,
int childrenNum, FileEncryptionInfo feInfo,
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
byte storagePolicy, ErasureCodingPolicy ecPolicy,
LocatedBlocks hdfsloc) {
super(length, isdir, replication, blocksize, mtime,
atime, convert(isdir, symlink != null, permission, flags),
owner, group, null, null,
flags.contains(Flags.HAS_ACL), flags.contains(Flags.HAS_CRYPT),
flags.contains(Flags.HAS_EC));
flags.contains(Flags.HAS_EC), null);
this.flags = flags;
this.uSymlink = symlink;
this.uPath = path;
@ -101,6 +105,7 @@ public class HdfsFileStatus extends FileStatus {
this.feInfo = feInfo;
this.storagePolicy = storagePolicy;
this.ecPolicy = ecPolicy;
this.hdfsloc = hdfsloc;
}
/**
@ -152,7 +157,7 @@ public class HdfsFileStatus extends FileStatus {
* Check if the local name is empty.
* @return true if the name is empty
*/
public final boolean isEmptyLocalName() {
public boolean isEmptyLocalName() {
return uPath.length == 0;
}
@ -160,7 +165,7 @@ public class HdfsFileStatus extends FileStatus {
* Get the string representation of the local name.
* @return the local name in string
*/
public final String getLocalName() {
public String getLocalName() {
return DFSUtilClient.bytes2String(uPath);
}
@ -168,7 +173,7 @@ public class HdfsFileStatus extends FileStatus {
* Get the Java UTF8 representation of the local name.
* @return the local name in java UTF8
*/
public final byte[] getLocalNameInBytes() {
public byte[] getLocalNameInBytes() {
return uPath;
}
@ -177,7 +182,7 @@ public class HdfsFileStatus extends FileStatus {
* @param parent the parent path
* @return the full path in string
*/
public final String getFullName(final String parent) {
public String getFullName(String parent) {
if (isEmptyLocalName()) {
return parent;
}
@ -195,7 +200,7 @@ public class HdfsFileStatus extends FileStatus {
* @param parent the parent path
* @return the full path
*/
public final Path getFullPath(final Path parent) {
public Path getFullPath(Path parent) {
if (isEmptyLocalName()) {
return parent;
}
@ -219,15 +224,15 @@ public class HdfsFileStatus extends FileStatus {
/**
* Opaque referant for the symlink, to be resolved at the client.
*/
public final byte[] getSymlinkInBytes() {
public byte[] getSymlinkInBytes() {
return uSymlink;
}
public final long getFileId() {
public long getFileId() {
return fileId;
}
public final FileEncryptionInfo getFileEncryptionInfo() {
public FileEncryptionInfo getFileEncryptionInfo() {
return feInfo;
}
@ -239,12 +244,12 @@ public class HdfsFileStatus extends FileStatus {
return ecPolicy;
}
public final int getChildrenNum() {
public int getChildrenNum() {
return childrenNum;
}
/** @return the storage policy id */
public final byte getStoragePolicy() {
public byte getStoragePolicy() {
return storagePolicy;
}
@ -257,6 +262,10 @@ public class HdfsFileStatus extends FileStatus {
return flags.contains(Flags.SNAPSHOT_ENABLED);
}
public LocatedBlocks getLocatedBlocks() {
return hdfsloc;
}
@Override
public boolean equals(Object o) {
// satisfy findbugs
@ -277,11 +286,30 @@ public class HdfsFileStatus extends FileStatus {
* @param parent Parent path of this element.
* @return Reference to this instance.
*/
public final FileStatus makeQualified(URI defaultUri, Path parent) {
public FileStatus makeQualified(URI defaultUri, Path parent) {
// fully-qualify path
setPath(getFullPath(parent).makeQualified(defaultUri, null));
return this; // API compatibility
}
/**
* This function is used to transform the underlying HDFS LocatedBlocks to
* BlockLocations. This method must be invoked before
* {@link #getBlockLocations()}.
*
* The returned BlockLocation will have different formats for replicated
* and erasure coded file.
* Please refer to
* {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations
* (FileStatus, long, long)}
* for examples.
*/
public LocatedFileStatus makeQualifiedLocated(URI defaultUri,
Path path) {
makeQualified(defaultUri, path);
setBlockLocations(
DFSUtilClient.locatedBlocks2Locations(getLocatedBlocks()));
return this;
}
/**
@ -311,6 +339,7 @@ public class HdfsFileStatus extends FileStatus {
private byte storagePolicy =
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
private ErasureCodingPolicy ecPolicy = null;
private LocatedBlocks locations = null;
/**
* Set the length of the entity (default = 0).
@ -489,13 +518,24 @@ public class HdfsFileStatus extends FileStatus {
return this;
}
/**
* Set the block locations for this entity (default = null).
* @param locations HDFS locations
* (see {@link #makeQualifiedLocated(URI, Path)})
* @return This Builder instance
*/
public Builder locations(LocatedBlocks locations) {
this.locations = locations;
return this;
}
/**
* @return An {@link HdfsFileStatus} instance from these parameters.
*/
public HdfsFileStatus build() {
return new HdfsFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, flags, owner, group, symlink, path, fileId,
childrenNum, feInfo, storagePolicy, ecPolicy);
childrenNum, feInfo, storagePolicy, ecPolicy, locations);
}
}

View File

@ -1,110 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.net.URI;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient;
/**
* Interface that represents the over the wire information
* including block locations for a file.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HdfsLocatedFileStatus extends HdfsFileStatus {
private static final long serialVersionUID = 0x23c73328;
/**
* Left transient, because {@link #makeQualifiedLocated(URI,Path)}
* is the user-facing type.
*/
private transient LocatedBlocks locations;
/**
* Constructor
*
* @param length size
* @param isdir if this is directory
* @param block_replication the file's replication factor
* @param blocksize the file's block size
* @param modification_time most recent modification time
* @param access_time most recent access time
* @param permission permission
* @param owner owner
* @param group group
* @param symlink symbolic link
* @param path local path name in java UTF8 format
* @param fileId the file id
* @param locations block locations
* @param feInfo file encryption info
*/
public HdfsLocatedFileStatus(long length, boolean isdir,
int block_replication, long blocksize, long modification_time,
long access_time, FsPermission permission, EnumSet<Flags> flags,
String owner, String group, byte[] symlink, byte[] path, long fileId,
LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo,
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, flags, owner, group, symlink, path, fileId,
childrenNum, feInfo, storagePolicy, ecPolicy);
this.locations = locations;
}
public LocatedBlocks getBlockLocations() {
return locations;
}
/**
* This function is used to transform the underlying HDFS LocatedBlocks to
* BlockLocations.
*
* The returned BlockLocation will have different formats for replicated
* and erasure coded file.
* Please refer to
* {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations
* (FileStatus, long, long)}
* for examples.
*/
public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
Path path) {
makeQualified(defaultUri, path);
return new LocatedFileStatus(this,
DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
}
@Override
public boolean equals(Object o) {
// satisfy findbugs
return super.equals(o);
}
@Override
public int hashCode() {
// satisfy findbugs
return super.hashCode();
}
}

View File

@ -90,7 +90,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -1585,23 +1584,36 @@ public class PBHelperClient {
EnumSet<HdfsFileStatus.Flags> flags = fs.hasFlags()
? convertFlags(fs.getFlags())
: convertFlags(fs.getPermission());
return new HdfsLocatedFileStatus(
fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
fs.getBlockReplication(), fs.getBlocksize(),
fs.getModificationTime(), fs.getAccessTime(),
convert(fs.getPermission()),
flags,
fs.getOwner(), fs.getGroup(),
fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(),
fs.hasFileId()? fs.getFileId(): HdfsConstants.GRANDFATHER_INODE_ID,
fs.hasLocations() ? convert(fs.getLocations()) : null,
fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null);
return new HdfsFileStatus.Builder()
.length(fs.getLength())
.isdir(fs.getFileType().equals(FileType.IS_DIR))
.replication(fs.getBlockReplication())
.blocksize(fs.getBlocksize())
.mtime(fs.getModificationTime())
.atime(fs.getAccessTime())
.perm(convert(fs.getPermission()))
.flags(flags)
.owner(fs.getOwner())
.group(fs.getGroup())
.symlink(FileType.IS_SYMLINK.equals(fs.getFileType())
? fs.getSymlink().toByteArray()
: null)
.path(fs.getPath().toByteArray())
.fileId(fs.hasFileId()
? fs.getFileId()
: HdfsConstants.GRANDFATHER_INODE_ID)
.locations(fs.hasLocations() ? convert(fs.getLocations()) : null)
.children(fs.hasChildrenNum() ? fs.getChildrenNum() : -1)
.feInfo(fs.hasFileEncryptionInfo()
? convert(fs.getFileEncryptionInfo())
: null)
.storagePolicy(fs.hasStoragePolicy()
? (byte) fs.getStoragePolicy()
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED)
.ecPolicy(fs.hasEcPolicy()
? convertErasureCodingPolicy(fs.getEcPolicy())
: null)
.build();
}
private static EnumSet<HdfsFileStatus.Flags> convertFlags(int flags) {
@ -1864,10 +1876,10 @@ public class PBHelperClient {
if (dl == null)
return null;
List<HdfsFileStatusProto> partList = dl.getPartialListingList();
return new DirectoryListing(partList.isEmpty() ?
new HdfsLocatedFileStatus[0] :
convert(partList.toArray(new HdfsFileStatusProto[partList.size()])),
dl.getRemainingEntries());
return new DirectoryListing(partList.isEmpty()
? new HdfsFileStatus[0]
: convert(partList.toArray(new HdfsFileStatusProto[partList.size()])),
dl.getRemainingEntries());
}
public static HdfsFileStatus[] convert(HdfsFileStatusProto[] fs) {
@ -2161,12 +2173,9 @@ public class PBHelperClient {
if (fs.getFileEncryptionInfo() != null) {
builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
}
if (fs instanceof HdfsLocatedFileStatus) {
final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
LocatedBlocks locations = lfs.getBlockLocations();
if (locations != null) {
builder.setLocations(convert(locations));
}
LocatedBlocks locations = fs.getLocatedBlocks();
if (locations != null) {
builder.setLocations(convert(locations));
}
if(fs.getErasureCodingPolicy() != null) {
builder.setEcPolicy(convertErasureCodingPolicy(

View File

@ -260,11 +260,10 @@
<Method name="visitFile" />
<Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
</Match>
<!-- HdfsFileStatus is user-facing, but HdfsLocatedFileStatus is not.
Defensible compatibility choices over time create odd corners. -->
<!-- BlockLocations are user-facing, but LocatedBlocks are not. -->
<Match>
<Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
<Field name="locations" />
<Class name="org.apache.hadoop.hdfs.protocol.HdfsFileStatus" />
<Field name="hdfsloc" />
<Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED" />
</Match>
<Match>

View File

@ -364,7 +364,7 @@ public class Mover {
if (!isSnapshotPathInCurrent(fullPath)) {
// the full path is a snapshot path but it is also included in the
// current directory tree, thus ignore it.
processFile(fullPath, (HdfsLocatedFileStatus) status, result);
processFile(fullPath, status, result);
}
} catch (IOException e) {
LOG.warn("Failed to check the status of " + parent
@ -374,7 +374,7 @@ public class Mover {
}
/** @return true if it is necessary to run another round of migration */
private void processFile(String fullPath, HdfsLocatedFileStatus status,
private void processFile(String fullPath, HdfsFileStatus status,
Result result) {
byte policyId = status.getStoragePolicy();
if (policyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
@ -395,7 +395,7 @@ public class Mover {
status.getReplication());
final ErasureCodingPolicy ecPolicy = status.getErasureCodingPolicy();
final LocatedBlocks locatedBlocks = status.getBlockLocations();
final LocatedBlocks locatedBlocks = status.getLocatedBlocks();
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
for (int i = 0; i < lbs.size(); i++) {

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@ -256,14 +255,13 @@ class FSDirStatAndListingOp {
listing[i] =
createFileStatus(fsd, iip, child, childStoragePolicy, needLocation);
listingCnt++;
if (listing[i] instanceof HdfsLocatedFileStatus) {
// Once we hit lsLimit locations, stop.
// This helps to prevent excessively large response payloads.
// Approximate #locations with locatedBlockCount() * repl_factor
LocatedBlocks blks =
((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
locationBudget -= (blks == null) ? 0 :
blks.locatedBlockCount() * listing[i].getReplication();
LocatedBlocks blks = listing[i].getLocatedBlocks();
if (blks != null) {
// Once we hit lsLimit locations, stop.
// This helps to prevent excessively large response payloads.
// Approximate #locations with locatedBlockCount() * repl_factor
locationBudget -=
blks.locatedBlockCount() * listing[i].getReplication();
}
}
// truncate return array if necessary
@ -486,31 +484,26 @@ class FSDirStatAndListingOp {
String owner, String group, byte[] symlink, byte[] path, long fileId,
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
if (locations == null) {
return new HdfsFileStatus.Builder()
.length(length)
.isdir(isdir)
.replication(replication)
.blocksize(blocksize)
.mtime(mtime)
.atime(atime)
.perm(permission)
.flags(flags)
.owner(owner)
.group(group)
.symlink(symlink)
.path(path)
.fileId(fileId)
.children(childrenNum)
.feInfo(feInfo)
.storagePolicy(storagePolicy)
.ecPolicy(ecPolicy)
.build();
} else {
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, flags, owner, group, symlink, path,
fileId, locations, childrenNum, feInfo, storagePolicy, ecPolicy);
}
return new HdfsFileStatus.Builder()
.length(length)
.isdir(isdir)
.replication(replication)
.blocksize(blocksize)
.mtime(mtime)
.atime(atime)
.perm(permission)
.flags(flags)
.owner(owner)
.group(group)
.symlink(symlink)
.path(path)
.fileId(fileId)
.children(childrenNum)
.feInfo(feInfo)
.storagePolicy(storagePolicy)
.ecPolicy(ecPolicy)
.locations(locations)
.build();
}
private static ContentSummary getContentSummaryInt(FSDirectory fsd,

View File

@ -1092,11 +1092,11 @@ public class TestBlockStoragePolicy {
return types;
}
private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum,
private void checkLocatedBlocks(HdfsFileStatus status, int blockNum,
int replicaNum, StorageType... types) {
List<StorageType> typeList = Lists.newArrayList();
Collections.addAll(typeList, types);
LocatedBlocks lbs = status.getBlockLocations();
LocatedBlocks lbs = status.getLocatedBlocks();
Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
@ -1127,7 +1127,7 @@ public class TestBlockStoragePolicy {
HdfsFileStatus[] status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
HdfsLocatedFileStatus fooStatus = (HdfsLocatedFileStatus) status[0];
HdfsFileStatus fooStatus = status[0];
checkLocatedBlocks(fooStatus, 1, 3, before);
// change the replication factor to 5
@ -1140,7 +1140,7 @@ public class TestBlockStoragePolicy {
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
fooStatus = status[0];
checkLocatedBlocks(fooStatus, 1, numDataNodes, after);
// change the replication factor back to 3
@ -1157,7 +1157,7 @@ public class TestBlockStoragePolicy {
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
fooStatus = status[0];
checkLocatedBlocks(fooStatus, 1, REPLICATION, before);
} finally {
cluster.shutdown();

View File

@ -274,7 +274,7 @@ public class TestDFSOutputStream {
when(client.getTracer()).thenReturn(FsTracer.get(new Configuration()));
client.clientRunning = true;
DataStreamer stream = new DataStreamer(
mock(HdfsFileStatus.class),
new HdfsFileStatus.Builder().build(),
mock(ExtendedBlock.class),
client,
"foo", null, null, null, null, null, null);

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
@ -312,18 +311,17 @@ public class TestStorageMover {
private void verifyFile(final Path parent, final HdfsFileStatus status,
final Byte expectedPolicyId) throws Exception {
HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
byte policyId = fileStatus.getStoragePolicy();
byte policyId = status.getStoragePolicy();
BlockStoragePolicy policy = policies.getPolicy(policyId);
if (expectedPolicyId != null) {
Assert.assertEquals((byte)expectedPolicyId, policy.getId());
}
final List<StorageType> types = policy.chooseStorageTypes(
status.getReplication());
for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
for(LocatedBlock lb : status.getLocatedBlocks().getLocatedBlocks()) {
final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
lb.getStorageTypes());
Assert.assertTrue(fileStatus.getFullName(parent.toString())
Assert.assertTrue(status.getFullName(parent.toString())
+ " with policy " + policy + " has non-empty overlap: " + diff
+ ", the corresponding block is " + lb.getBlock().getLocalBlock(),
diff.removeOverlap(true));