HDFS-12882. Support full open(PathHandle) contract in HDFS

This commit is contained in:
Chris Douglas 2017-12-11 20:14:15 -08:00
parent 55fc2d6485
commit 693169ef34
29 changed files with 387 additions and 157 deletions

View File

@ -218,14 +218,18 @@ public void testOpenFileByExact() throws Throwable {
Path path2 = path("testopenfilebyexact2"); Path path2 = path("testopenfilebyexact2");
byte[] file1 = dataset(TEST_FILE_LEN, 43, 255); byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
createFile(getFileSystem(), path1, false, file1); createFile(getFileSystem(), path1, false, file1);
FileStatus stat = getFileSystem().getFileStatus(path1); FileStatus stat1 = getFileSystem().getFileStatus(path1);
assertNotNull(stat); assertNotNull(stat1);
assertEquals(path1, stat.getPath()); assertEquals(path1, stat1.getPath());
ContractTestUtils.rename(getFileSystem(), path1, path2); ContractTestUtils.rename(getFileSystem(), path1, path2);
FileStatus stat2 = getFileSystem().getFileStatus(path2);
assertNotNull(stat2);
assertEquals(path2, stat2.getPath());
// create identical file at same location, orig still exists at path2 // create identical file at same location, orig still exists at path2
createFile(getFileSystem(), path1, false, file1); createFile(getFileSystem(), path1, false, file1);
PathHandle fd = getHandleOrSkip(stat, HandleOpt.exact()); PathHandle fd1 = getHandleOrSkip(stat1, HandleOpt.exact());
PathHandle fd2 = getHandleOrSkip(stat2, HandleOpt.exact());
// verify path1, path2 contents identical // verify path1, path2 contents identical
verifyFileContents(getFileSystem(), path1, file1); verifyFileContents(getFileSystem(), path1, file1);
@ -235,11 +239,15 @@ public void testOpenFileByExact() throws Throwable {
// the original entity exists, it has not been modified, and an // the original entity exists, it has not been modified, and an
// identical file exists at the old path. The handle would also // identical file exists at the old path. The handle would also
// fail to resolve if path1 had been modified // fail to resolve if path1 had been modified
instream = getFileSystem().open(fd, 1 << 15); instream = getFileSystem().open(fd1);
fail("Expected an exception"); fail("Expected an exception");
} catch (IOException e) { } catch (IOException e) {
// expected // expected
} }
// verify unchanged resolves
instream = getFileSystem().open(fd2);
verifyRead(instream, file1, 0, TEST_FILE_LEN);
} }
/** /**
@ -265,7 +273,7 @@ public void testOpenFileByContent() throws Throwable {
// obtain handle to entity from #getFileStatus call // obtain handle to entity from #getFileStatus call
PathHandle fd = getHandleOrSkip(stat, HandleOpt.content()); PathHandle fd = getHandleOrSkip(stat, HandleOpt.content());
try (FSDataInputStream in = getFileSystem().open(fd, 1 << 15)) { try (FSDataInputStream in = getFileSystem().open(fd)) {
// verify read of consistent content at new location // verify read of consistent content at new location
verifyRead(in, file1, 0, TEST_FILE_LEN); verifyRead(in, file1, 0, TEST_FILE_LEN);
} }
@ -280,7 +288,7 @@ public void testOpenFileByContent() throws Throwable {
try { try {
// handle should not resolve when content changed // handle should not resolve when content changed
instream = getFileSystem().open(fd, 1 << 15); instream = getFileSystem().open(fd);
fail("Failed to detect change to content"); fail("Failed to detect change to content");
} catch (IOException e) { } catch (IOException e) {
// expected // expected
@ -302,25 +310,40 @@ public void testOpenFileByPath() throws Throwable {
byte[] file1 = dataset(TEST_FILE_LEN, 43, 255); byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
createFile(getFileSystem(), path1, false, file1); createFile(getFileSystem(), path1, false, file1);
FileStatus stat = getFileSystem().getFileStatus(path1); FileStatus stat1 = getFileSystem().getFileStatus(path1);
assertNotNull(stat); assertNotNull(stat1);
assertEquals(path1, stat.getPath()); assertEquals(path1, stat1.getPath());
ContractTestUtils.rename(getFileSystem(), path1, path2); ContractTestUtils.rename(getFileSystem(), path1, path2);
FileStatus stat2 = getFileSystem().getFileStatus(path2);
assertNotNull(stat2);
assertEquals(path2, stat2.getPath());
// create identical file at same location, orig still exists at path2 // create identical file at same location, orig still exists at path2
createFile(getFileSystem(), path1, false, file1); createFile(getFileSystem(), path1, false, file1);
PathHandle fd = getHandleOrSkip(stat, HandleOpt.path()); PathHandle fd1 = getHandleOrSkip(stat1, HandleOpt.path());
PathHandle fd2 = getHandleOrSkip(stat2, HandleOpt.path());
// verify path1, path2 contents identical // verify path1, path2 contents identical
verifyFileContents(getFileSystem(), path1, file1); verifyFileContents(getFileSystem(), path1, file1);
verifyFileContents(getFileSystem(), path2, file1); verifyFileContents(getFileSystem(), path2, file1);
try { try {
// verify attempt to resolve the handle fails // verify attempt to resolve the handle fails
instream = getFileSystem().open(fd, 1 << 15); instream = getFileSystem().open(fd1);
fail("Expected an exception"); fail("Expected an exception");
} catch (IOException e) { } catch (IOException e) {
// expected // expected
} }
// verify content change OK
byte[] file2a = dataset(TEST_FILE_LEN, 44, 255);
ContractTestUtils.appendFile(getFileSystem(), path2, file2a);
byte[] file2x = Arrays.copyOf(file1, file1.length + file2a.length);
System.arraycopy(file2a, 0, file2x, file1.length, file2a.length);
// verify path2 contains contents of orig + appended bytes
verifyFileContents(getFileSystem(), path2, file2x);
// verify open by fd succeeds
instream = getFileSystem().open(fd2);
verifyRead(instream, file2x, 0, 2 * TEST_FILE_LEN);
} }
/** /**
@ -357,8 +380,8 @@ public void testOpenFileByReference() throws Throwable {
verifyFileContents(getFileSystem(), path1, file2); verifyFileContents(getFileSystem(), path1, file2);
// verify fd contains contents of file1 + appended bytes // verify fd contains contents of file1 + appended bytes
instream = getFileSystem().open(fd, 1 << 15); instream = getFileSystem().open(fd);
verifyRead(instream, file1x, 0, TEST_FILE_LEN); verifyRead(instream, file1x, 0, 2 * TEST_FILE_LEN);
} }
/** /**
@ -388,7 +411,7 @@ public void testOpenFileBySerializedReference() throws Throwable {
ByteBuffer sb = fd.bytes(); ByteBuffer sb = fd.bytes();
PathHandle fdb = new RawPathHandle(sb); PathHandle fdb = new RawPathHandle(sb);
instream = getFileSystem().open(fdb, 1 << 15); instream = getFileSystem().open(fdb);
// verify stat contains contents of file1 // verify stat contains contents of file1
verifyRead(instream, file1, 0, TEST_FILE_LEN); verifyRead(instream, file1, 0, TEST_FILE_LEN);
// verify path2 contains contents of file1 // verify path2 contains contents of file1

View File

@ -125,6 +125,8 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -1015,16 +1017,46 @@ public DFSInputStream open(String src, int buffersize, boolean verifyChecksum)
// Get block info from namenode // Get block info from namenode
try (TraceScope ignored = newPathTraceScope("newDFSInputStream", src)) { try (TraceScope ignored = newPathTraceScope("newDFSInputStream", src)) {
LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0); LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
if (locatedBlocks != null) { return openInternal(locatedBlocks, src, verifyChecksum);
ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy(); }
if (ecPolicy != null) { }
return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
locatedBlocks); /**
} * Create an input stream from the {@link HdfsPathHandle} if the
return new DFSInputStream(this, src, verifyChecksum, locatedBlocks); * constraints encoded from {@link
} else { * DistributedFileSystem#createPathHandle(FileStatus, Options.HandleOpt...)}
throw new IOException("Cannot open filename " + src); * are satisfied. Note that HDFS does not ensure that these constraints
* remain invariant for the life of the stream. It only checks that they
* still held when the stream was opened.
* @param fd Handle to an entity in HDFS, with constraints
* @param buffersize ignored
* @param verifyChecksum Verify checksums before returning data to client
* @return Data from the referent of the {@link HdfsPathHandle}.
* @throws IOException On I/O error
*/
public DFSInputStream open(HdfsPathHandle fd, int buffersize,
boolean verifyChecksum) throws IOException {
checkOpen();
String src = fd.getPath();
try (TraceScope ignored = newPathTraceScope("newDFSInputStream", src)) {
HdfsLocatedFileStatus s = getLocatedFileInfo(src, true);
fd.verify(s); // check invariants in path handle
LocatedBlocks locatedBlocks = s.getLocatedBlocks();
return openInternal(locatedBlocks, src, verifyChecksum);
}
}
private DFSInputStream openInternal(LocatedBlocks locatedBlocks, String src,
boolean verifyChecksum) throws IOException {
if (locatedBlocks != null) {
ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
if (ecPolicy != null) {
return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
locatedBlocks);
} }
return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
} else {
throw new IOException("Cannot open filename " + src);
} }
} }
@ -1647,6 +1679,30 @@ public HdfsFileStatus getFileInfo(String src) throws IOException {
} }
} }
/**
* Get the file info for a specific file or directory.
* @param src The string representation of the path to the file
* @param needBlockToken Include block tokens in {@link LocatedBlocks}.
* When block tokens are included, this call is a superset of
* {@link #getBlockLocations(String, long)}.
* @return object containing information regarding the file
* or null if file not found
*
* @see DFSClient#open(HdfsPathHandle, int, boolean)
* @see ClientProtocol#getFileInfo(String) for description of
* exceptions
*/
public HdfsLocatedFileStatus getLocatedFileInfo(String src,
boolean needBlockToken) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getLocatedFileInfo", src)) {
return namenode.getLocatedFileInfo(src, needBlockToken);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
/** /**
* Close status of a file * Close status of a file
* @return true if file is already closed * @return true if file is already closed

View File

@ -115,7 +115,7 @@
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors; import java.util.Optional;
/**************************************************************** /****************************************************************
* Implementation of the abstract FileSystem for the DFS system. * Implementation of the abstract FileSystem for the DFS system.
@ -340,11 +340,14 @@ public FSDataInputStream next(final FileSystem fs, final Path p)
@Override @Override
public FSDataInputStream open(PathHandle fd, int bufferSize) public FSDataInputStream open(PathHandle fd, int bufferSize)
throws IOException { throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.OPEN);
if (!(fd instanceof HdfsPathHandle)) { if (!(fd instanceof HdfsPathHandle)) {
fd = new HdfsPathHandle(fd.bytes()); fd = new HdfsPathHandle(fd.bytes());
} }
HdfsPathHandle id = (HdfsPathHandle) fd; HdfsPathHandle id = (HdfsPathHandle) fd;
return open(DFSUtilClient.makePathFromFileId(id.getInodeId()), bufferSize); final DFSInputStream dfsis = dfs.open(id, bufferSize, verifyChecksum);
return dfs.createWrappedInputStream(dfsis);
} }
/** /**
@ -358,7 +361,7 @@ public FSDataInputStream open(PathHandle fd, int bufferSize)
* @return A handle to the file. * @return A handle to the file.
*/ */
@Override @Override
protected PathHandle createPathHandle(FileStatus st, HandleOpt... opts) { protected HdfsPathHandle createPathHandle(FileStatus st, HandleOpt... opts) {
if (!(st instanceof HdfsFileStatus)) { if (!(st instanceof HdfsFileStatus)) {
throw new IllegalArgumentException("Invalid FileStatus " throw new IllegalArgumentException("Invalid FileStatus "
+ st.getClass().getSimpleName()); + st.getClass().getSimpleName());
@ -373,12 +376,21 @@ protected PathHandle createPathHandle(FileStatus st, HandleOpt... opts) {
.orElse(HandleOpt.changed(false)); .orElse(HandleOpt.changed(false));
HandleOpt.Location loc = HandleOpt.getOpt(HandleOpt.Location.class, opts) HandleOpt.Location loc = HandleOpt.getOpt(HandleOpt.Location.class, opts)
.orElse(HandleOpt.moved(false)); .orElse(HandleOpt.moved(false));
if (!data.allowChange() || !loc.allowChange()) {
throw new UnsupportedOperationException("Unsupported opts " HdfsFileStatus hst = (HdfsFileStatus) st;
+ Arrays.stream(opts) final Path p;
.map(HandleOpt::toString).collect(Collectors.joining(","))); final Optional<Long> inodeId;
if (loc.allowChange()) {
p = DFSUtilClient.makePathFromFileId(hst.getFileId());
inodeId = Optional.empty();
} else {
p = hst.getPath();
inodeId = Optional.of(hst.getFileId());
} }
return new HdfsPathHandle((HdfsFileStatus)st); final Optional<Long> mtime = !data.allowChange()
? Optional.of(hst.getModificationTime())
: Optional.empty();
return new HdfsPathHandle(getPathName(p), inodeId, mtime);
} }
@Override @Override

View File

@ -1024,6 +1024,21 @@ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
@Idempotent @Idempotent
HdfsFileStatus getFileLinkInfo(String src) throws IOException; HdfsFileStatus getFileLinkInfo(String src) throws IOException;
/**
* Get the file info for a specific file or directory with
* {@link LocatedBlocks}.
* @param src The string representation of the path to the file
* @param needBlockToken Generate block tokens for {@link LocatedBlocks}
* @return object containing information regarding the file
* or null if file not found
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>src</code> is not found
* @throws IOException If an I/O error occurred
*/
@Idempotent
HdfsLocatedFileStatus getLocatedFileInfo(String src, boolean needBlockToken)
throws IOException;
/** /**
* Get {@link ContentSummary} rooted at the specified directory. * Get {@link ContentSummary} rooted at the specified directory.
* @param path The string representation of the path * @param path The string representation of the path

View File

@ -19,6 +19,7 @@
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Optional;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -34,16 +35,17 @@
@InterfaceStability.Unstable @InterfaceStability.Unstable
public final class HdfsPathHandle implements PathHandle { public final class HdfsPathHandle implements PathHandle {
private static final long serialVersionUID = 0xc5308795428L; private static final long serialVersionUID = 0xc53087a5428L;
private final long inodeId; private final String path;
private final Long mtime;
private final Long inodeId;
public HdfsPathHandle(HdfsFileStatus hstat) { public HdfsPathHandle(String path,
this(hstat.getFileId()); Optional<Long> inodeId, Optional<Long> mtime) {
} this.path = path;
this.mtime = mtime.orElse(null);
public HdfsPathHandle(long inodeId) { this.inodeId = inodeId.orElse(null);
this.inodeId = inodeId;
} }
public HdfsPathHandle(ByteBuffer bytes) throws IOException { public HdfsPathHandle(ByteBuffer bytes) throws IOException {
@ -52,20 +54,39 @@ public HdfsPathHandle(ByteBuffer bytes) throws IOException {
} }
HdfsPathHandleProto p = HdfsPathHandleProto p =
HdfsPathHandleProto.parseFrom(ByteString.copyFrom(bytes)); HdfsPathHandleProto.parseFrom(ByteString.copyFrom(bytes));
inodeId = p.getInodeId(); path = p.getPath();
mtime = p.hasMtime()
? p.getMtime()
: null;
inodeId = p.hasInodeId()
? p.getInodeId()
: null;
} }
public long getInodeId() { public String getPath() {
return inodeId; return path;
}
public void verify(HdfsLocatedFileStatus stat) throws IOException {
if (mtime != null && mtime != stat.getModificationTime()) {
throw new IOException("Content changed");
}
if (inodeId != null && inodeId != stat.getFileId()) {
throw new IOException("Wrong file");
}
} }
@Override @Override
public ByteBuffer bytes() { public ByteBuffer bytes() {
return HdfsPathHandleProto.newBuilder() HdfsPathHandleProto.Builder b = HdfsPathHandleProto.newBuilder();
.setInodeId(getInodeId()) b.setPath(path);
.build() if (inodeId != null) {
.toByteString() b.setInodeId(inodeId);
.asReadOnlyByteBuffer(); }
if (mtime != null) {
b.setMtime(mtime);
}
return b.build().toByteString().asReadOnlyByteBuffer();
} }
@Override @Override
@ -78,19 +99,25 @@ public boolean equals(Object other) {
return false; return false;
} }
HdfsPathHandle o = (HdfsPathHandle)other; HdfsPathHandle o = (HdfsPathHandle)other;
return getInodeId() == o.getInodeId(); return getPath().equals(o.getPath());
} }
@Override @Override
public int hashCode() { public int hashCode() {
return Long.hashCode(inodeId); return path.hashCode();
} }
@Override @Override
public String toString() { public String toString() {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
sb.append("{ "); sb.append("{ ");
sb.append("inodeId : ").append(Long.toString(getInodeId())); sb.append("\"path\" : \"").append(path).append("\"");
if (inodeId != null) {
sb.append(",\"inodeId\" : ").append(inodeId);
}
if (mtime != null) {
sb.append(",\"mtime\" : ").append(mtime);
}
sb.append(" }"); sb.append(" }");
return sb.toString(); return sb.toString();
} }

View File

@ -71,6 +71,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -129,6 +130,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
@ -872,7 +875,8 @@ public void metaSave(String filename) throws IOException {
@Override @Override
public HdfsFileStatus getFileInfo(String src) throws IOException { public HdfsFileStatus getFileInfo(String src) throws IOException {
GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder() GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder()
.setSrc(src).build(); .setSrc(src)
.build();
try { try {
GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req); GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
return res.hasFs() ? PBHelperClient.convert(res.getFs()) : null; return res.hasFs() ? PBHelperClient.convert(res.getFs()) : null;
@ -881,6 +885,25 @@ public HdfsFileStatus getFileInfo(String src) throws IOException {
} }
} }
@Override
public HdfsLocatedFileStatus getLocatedFileInfo(String src,
boolean needBlockToken) throws IOException {
GetLocatedFileInfoRequestProto req =
GetLocatedFileInfoRequestProto.newBuilder()
.setSrc(src)
.setNeedBlockToken(needBlockToken)
.build();
try {
GetLocatedFileInfoResponseProto res =
rpcProxy.getLocatedFileInfo(null, req);
return (HdfsLocatedFileStatus) (res.hasFs()
? PBHelperClient.convert(res.getFs())
: null);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override @Override
public HdfsFileStatus getFileLinkInfo(String src) throws IOException { public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
GetFileLinkInfoRequestProto req = GetFileLinkInfoRequestProto.newBuilder() GetFileLinkInfoRequestProto req = GetFileLinkInfoRequestProto.newBuilder()

View File

@ -91,7 +91,6 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
@ -164,7 +163,6 @@
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto;
@ -1624,19 +1622,6 @@ public static FsPermissionProto convert(FsPermission p) {
return FsPermissionProto.newBuilder().setPerm(p.toShort()).build(); return FsPermissionProto.newBuilder().setPerm(p.toShort()).build();
} }
public static HdfsPathHandle convert(HdfsPathHandleProto fd) {
if (null == fd) {
return null;
}
return new HdfsPathHandle(fd.getInodeId());
}
public static HdfsPathHandleProto convert(HdfsPathHandle fd) {
return HdfsPathHandleProto.newBuilder()
.setInodeId(fd.getInodeId())
.build();
}
public static HdfsFileStatus convert(HdfsFileStatusProto fs) { public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
if (fs == null) { if (fs == null) {
return null; return null;

View File

@ -495,6 +495,15 @@ message GetFileInfoResponseProto {
optional HdfsFileStatusProto fs = 1; optional HdfsFileStatusProto fs = 1;
} }
message GetLocatedFileInfoRequestProto {
optional string src = 1;
optional bool needBlockToken = 2 [default = false];
}
message GetLocatedFileInfoResponseProto {
optional HdfsFileStatusProto fs = 1;
}
message IsFileClosedRequestProto { message IsFileClosedRequestProto {
required string src = 1; required string src = 1;
} }
@ -868,6 +877,8 @@ service ClientNamenodeProtocol {
returns(ListCorruptFileBlocksResponseProto); returns(ListCorruptFileBlocksResponseProto);
rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto); rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto); rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
rpc getLocatedFileInfo(GetLocatedFileInfoRequestProto)
returns(GetLocatedFileInfoResponseProto);
rpc addCacheDirective(AddCacheDirectiveRequestProto) rpc addCacheDirective(AddCacheDirectiveRequestProto)
returns (AddCacheDirectiveResponseProto); returns (AddCacheDirectiveResponseProto);
rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto) rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)

View File

@ -401,6 +401,8 @@ message AddErasureCodingPolicyResponseProto {
*/ */
message HdfsPathHandleProto { message HdfsPathHandleProto {
optional uint64 inodeId = 1; optional uint64 inodeId = 1;
optional uint64 mtime = 2;
optional string path = 3;
} }
/** /**

View File

@ -136,6 +136,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto;
@ -344,6 +346,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE =
GetFileInfoResponseProto.newBuilder().build(); GetFileInfoResponseProto.newBuilder().build();
private static final GetLocatedFileInfoResponseProto
VOID_GETLOCATEDFILEINFO_RESPONSE =
GetLocatedFileInfoResponseProto.newBuilder().build();
private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE =
GetFileLinkInfoResponseProto.newBuilder().build(); GetFileLinkInfoResponseProto.newBuilder().build();
@ -952,7 +958,23 @@ public GetFileInfoResponseProto getFileInfo(RpcController controller,
} }
@Override @Override
public GetLocatedFileInfoResponseProto getLocatedFileInfo(
RpcController controller, GetLocatedFileInfoRequestProto req)
throws ServiceException {
try {
HdfsFileStatus result = server.getLocatedFileInfo(req.getSrc(),
req.getNeedBlockToken());
if (result != null) {
return GetLocatedFileInfoResponseProto.newBuilder().setFs(
PBHelperClient.convert(result)).build();
}
return VOID_GETLOCATEDFILEINFO_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller, public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
GetFileLinkInfoRequestProto req) throws ServiceException { GetFileLinkInfoRequestProto req) throws ServiceException {
try { try {

View File

@ -85,6 +85,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -1071,6 +1072,18 @@ public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
locations, method, HdfsFileStatus.class, null); locations, method, HdfsFileStatus.class, null);
} }
@Override
public HdfsLocatedFileStatus getLocatedFileInfo(String src,
boolean needBlockToken) throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getLocatedFileInfo",
new Class<?>[] {String.class, boolean.class}, new RemoteParam(),
Boolean.valueOf(needBlockToken));
return (HdfsLocatedFileStatus) rpcClient.invokeSequential(
locations, method, HdfsFileStatus.class, null);
}
@Override // ClientProtocol @Override // ClientProtocol
public long[] getStats() throws IOException { public long[] getStats() throws IOException {
checkOperation(OperationCategory.UNCHECKED); checkOperation(OperationCategory.UNCHECKED);

View File

@ -148,7 +148,8 @@ static LastBlockWithStatus appendFile(final FSNamesystem fsn,
fsd.writeUnlock(); fsd.writeUnlock();
} }
HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip); HdfsFileStatus stat =
FSDirStatAndListingOp.getFileInfo(fsd, iip, false, false);
if (lb != null) { if (lb != null) {
NameNode.stateChangeLog.debug( NameNode.stateChangeLog.debug(
"DIR* NameSystem.appendFile: file {} for {} at {} block {} block" "DIR* NameSystem.appendFile: file {} for {} at {} block {} block"

View File

@ -23,6 +23,7 @@
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
@ -213,7 +214,9 @@ static Collection<String> getSnapshotFiles(FSDirectory fsd,
snapname += Path.SEPARATOR; snapname += Path.SEPARATOR;
} }
snapname += file.substring(file.indexOf(dirName) + dirName.length()); snapname += file.substring(file.indexOf(dirName) + dirName.length());
if (fsd.getFSNamesystem().getFileInfo(snapname, true) != null) { HdfsFileStatus stat =
fsd.getFSNamesystem().getFileInfo(snapname, true, false, false);
if (stat != null) {
snaps.add(snapname); snaps.add(snapname);
} }
} }

View File

@ -90,11 +90,13 @@ static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
* @param resolveLink whether to throw UnresolvedLinkException * @param resolveLink whether to throw UnresolvedLinkException
* if src refers to a symlink * if src refers to a symlink
* *
* @param needLocation Include {@link LocatedBlocks} in result.
* @param needBlockToken Include block tokens in {@link LocatedBlocks}.
* @return object containing information regarding the file * @return object containing information regarding the file
* or null if file not found * or null if file not found
*/ */
static HdfsFileStatus getFileInfo( static HdfsFileStatus getFileInfo(FSDirectory fsd, String srcArg,
FSDirectory fsd, String srcArg, boolean resolveLink) boolean resolveLink, boolean needLocation, boolean needBlockToken)
throws IOException { throws IOException {
DirOp dirOp = resolveLink ? DirOp.READ : DirOp.READ_LINK; DirOp dirOp = resolveLink ? DirOp.READ : DirOp.READ_LINK;
FSPermissionChecker pc = fsd.getPermissionChecker(); FSPermissionChecker pc = fsd.getPermissionChecker();
@ -111,7 +113,7 @@ static HdfsFileStatus getFileInfo(
} else { } else {
iip = fsd.resolvePath(pc, srcArg, dirOp); iip = fsd.resolvePath(pc, srcArg, dirOp);
} }
return getFileInfo(fsd, iip); return getFileInfo(fsd, iip, needLocation, needBlockToken);
} }
/** /**
@ -234,7 +236,7 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
// target INode // target INode
return new DirectoryListing( return new DirectoryListing(
new HdfsFileStatus[]{ createFileStatus( new HdfsFileStatus[]{ createFileStatus(
fsd, iip, null, parentStoragePolicy, needLocation) fsd, iip, null, parentStoragePolicy, needLocation, false)
}, 0); }, 0);
} }
@ -253,8 +255,8 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
? getStoragePolicyID(child.getLocalStoragePolicyID(), ? getStoragePolicyID(child.getLocalStoragePolicyID(),
parentStoragePolicy) parentStoragePolicy)
: parentStoragePolicy; : parentStoragePolicy;
listing[i] = listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy,
createFileStatus(fsd, iip, child, childStoragePolicy, needLocation); needLocation, false);
listingCnt++; listingCnt++;
if (listing[i] instanceof HdfsLocatedFileStatus) { if (listing[i] instanceof HdfsLocatedFileStatus) {
// Once we hit lsLimit locations, stop. // Once we hit lsLimit locations, stop.
@ -305,7 +307,7 @@ private static DirectoryListing getSnapshotsListing(
for (int i = 0; i < numOfListing; i++) { for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot(); Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, iip, sRoot, listing[i] = createFileStatus(fsd, iip, sRoot,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false); HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false, false);
} }
return new DirectoryListing( return new DirectoryListing(
listing, snapshots.size() - skipSize - numOfListing); listing, snapshots.size() - skipSize - numOfListing);
@ -324,11 +326,14 @@ private static DirectoryListing getReservedListing(FSDirectory fsd) {
* @param fsd FSDirectory * @param fsd FSDirectory
* @param iip The path to the file, the file is included * @param iip The path to the file, the file is included
* @param includeStoragePolicy whether to include storage policy * @param includeStoragePolicy whether to include storage policy
* @param needLocation Include {@link LocatedBlocks} in response
* @param needBlockToken Generate block tokens for {@link LocatedBlocks}
* @return object containing information regarding the file * @return object containing information regarding the file
* or null if file not found * or null if file not found
*/ */
static HdfsFileStatus getFileInfo(FSDirectory fsd, static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip,
INodesInPath iip, boolean includeStoragePolicy) throws IOException { boolean includeStoragePolicy, boolean needLocation,
boolean needBlockToken) throws IOException {
fsd.readLock(); fsd.readLock();
try { try {
final INode node = iip.getLastINode(); final INode node = iip.getLastINode();
@ -338,14 +343,15 @@ static HdfsFileStatus getFileInfo(FSDirectory fsd,
byte policy = (includeStoragePolicy && !node.isSymlink()) byte policy = (includeStoragePolicy && !node.isSymlink())
? node.getStoragePolicyID() ? node.getStoragePolicyID()
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return createFileStatus(fsd, iip, null, policy, false); return createFileStatus(fsd, iip, null, policy, needLocation,
needBlockToken);
} finally { } finally {
fsd.readUnlock(); fsd.readUnlock();
} }
} }
static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip) static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip,
throws IOException { boolean needLocation, boolean needBlockToken) throws IOException {
fsd.readLock(); fsd.readLock();
try { try {
HdfsFileStatus status = null; HdfsFileStatus status = null;
@ -356,7 +362,7 @@ static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip)
status = FSDirectory.DOT_SNAPSHOT_DIR_STATUS; status = FSDirectory.DOT_SNAPSHOT_DIR_STATUS;
} }
} else { } else {
status = getFileInfo(fsd, iip, true); status = getFileInfo(fsd, iip, true, needLocation, needBlockToken);
} }
return status; return status;
} finally { } finally {
@ -373,7 +379,7 @@ static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip)
static HdfsFileStatus createFileStatusForEditLog( static HdfsFileStatus createFileStatusForEditLog(
FSDirectory fsd, INodesInPath iip) throws IOException { FSDirectory fsd, INodesInPath iip) throws IOException {
return createFileStatus(fsd, iip, return createFileStatus(fsd, iip,
null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false); null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false, false);
} }
/** /**
@ -384,12 +390,13 @@ static HdfsFileStatus createFileStatusForEditLog(
* @param child for a directory listing of the iip, else null * @param child for a directory listing of the iip, else null
* @param storagePolicy for the path or closest ancestor * @param storagePolicy for the path or closest ancestor
* @param needLocation if block locations need to be included or not * @param needLocation if block locations need to be included or not
* @param needBlockToken
* @return a file status * @return a file status
* @throws java.io.IOException if any error occurs * @throws java.io.IOException if any error occurs
*/ */
private static HdfsFileStatus createFileStatus( private static HdfsFileStatus createFileStatus(
FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy, FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy,
boolean needLocation) throws IOException { boolean needLocation, boolean needBlockToken) throws IOException {
assert fsd.hasReadLock(); assert fsd.hasReadLock();
// only directory listing sets the status name. // only directory listing sets the status name.
byte[] name = HdfsFileStatus.EMPTY_NAME; byte[] name = HdfsFileStatus.EMPTY_NAME;
@ -429,8 +436,8 @@ private static HdfsFileStatus createFileStatus(
final long fileSize = !inSnapshot && isUc final long fileSize = !inSnapshot && isUc
? fileNode.computeFileSizeNotIncludingLastUcBlock() : size; ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
loc = fsd.getBlockManager().createLocatedBlocks( loc = fsd.getBlockManager().createLocatedBlocks(
fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false, fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size,
inSnapshot, feInfo, ecPolicy); needBlockToken, inSnapshot, feInfo, ecPolicy);
if (loc == null) { if (loc == null) {
loc = new LocatedBlocks(); loc = new LocatedBlocks();
} }

View File

@ -408,7 +408,7 @@ static HdfsFileStatus startFile(
NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " + NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
src + " inode " + newNode.getId() + " " + holder); src + " inode " + newNode.getId() + " " + holder);
} }
return FSDirStatAndListingOp.getFileInfo(fsd, iip); return FSDirStatAndListingOp.getFileInfo(fsd, iip, false, false);
} }
static INodeFile addFileForEditLog( static INodeFile addFileForEditLog(

View File

@ -2990,6 +2990,8 @@ void removeLeasesAndINodes(List<Long> removedUCFiles,
* @param resolveLink whether to throw UnresolvedLinkException * @param resolveLink whether to throw UnresolvedLinkException
* if src refers to a symlink * if src refers to a symlink
* *
* @param needLocation Include {@link LocatedBlocks} in result.
* @param needBlockToken Include block tokens in {@link LocatedBlocks}
* @throws AccessControlException if access is denied * @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered. * @throws UnresolvedLinkException if a symlink is encountered.
* *
@ -2997,15 +2999,19 @@ void removeLeasesAndINodes(List<Long> removedUCFiles,
* or null if file not found * or null if file not found
* @throws StandbyException * @throws StandbyException
*/ */
HdfsFileStatus getFileInfo(final String src, boolean resolveLink) HdfsFileStatus getFileInfo(final String src, boolean resolveLink,
throws IOException { boolean needLocation, boolean needBlockToken) throws IOException {
final String operationName = "getfileinfo"; // if the client requests block tokens, then it can read data blocks
// and should appear in the audit log as if getBlockLocations had been
// called
final String operationName = needBlockToken ? "open" : "getfileinfo";
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
HdfsFileStatus stat = null; HdfsFileStatus stat = null;
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
stat = FSDirStatAndListingOp.getFileInfo(dir, src, resolveLink); stat = FSDirStatAndListingOp.getFileInfo(
dir, src, resolveLink, needLocation, needBlockToken);
} catch (AccessControlException e) { } catch (AccessControlException e) {
logAuditEvent(false, operationName, src); logAuditEvent(false, operationName, src);
throw e; throw e;
@ -6158,7 +6164,7 @@ List<String> listCorruptFileBlocksWithSnapshot(String path,
} }
for (CorruptFileBlockInfo c : corruptFileBlocks) { for (CorruptFileBlockInfo c : corruptFileBlocks) {
if (getFileInfo(c.path, true) != null) { if (getFileInfo(c.path, true, false, false) != null) {
list.add(c.toString()); list.add(c.toString());
} }
final Collection<String> snaps = FSDirSnapshotOp final Collection<String> snaps = FSDirSnapshotOp

View File

@ -104,6 +104,7 @@
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
@ -1138,12 +1139,25 @@ public DirectoryListing getListing(String src, byte[] startAfter,
} }
@Override // ClientProtocol @Override // ClientProtocol
public HdfsFileStatus getFileInfo(String src) throws IOException { public HdfsFileStatus getFileInfo(String src) throws IOException {
checkNNStartup(); checkNNStartup();
metrics.incrFileInfoOps(); metrics.incrFileInfoOps();
return namesystem.getFileInfo(src, true); return namesystem.getFileInfo(src, true, false, false);
} }
@Override // ClientProtocol
public HdfsLocatedFileStatus getLocatedFileInfo(String src,
boolean needBlockToken) throws IOException {
checkNNStartup();
if (needBlockToken) {
metrics.incrGetBlockLocations();
} else {
metrics.incrFileInfoOps();
}
return (HdfsLocatedFileStatus)
namesystem.getFileInfo(src, true, true, needBlockToken);
}
@Override // ClientProtocol @Override // ClientProtocol
public boolean isFileClosed(String src) throws IOException{ public boolean isFileClosed(String src) throws IOException{
checkNNStartup(); checkNNStartup();
@ -1154,7 +1168,7 @@ public boolean isFileClosed(String src) throws IOException{
public HdfsFileStatus getFileLinkInfo(String src) throws IOException { public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
checkNNStartup(); checkNNStartup();
metrics.incrFileInfoOps(); metrics.incrFileInfoOps();
return namesystem.getFileInfo(src, false); return namesystem.getFileInfo(src, false, false, false);
} }
@Override // ClientProtocol @Override // ClientProtocol
@ -1429,7 +1443,7 @@ public String getLinkTarget(String path) throws IOException {
metrics.incrGetLinkTargetOps(); metrics.incrGetLinkTargetOps();
HdfsFileStatus stat = null; HdfsFileStatus stat = null;
try { try {
stat = namesystem.getFileInfo(path, false); stat = namesystem.getFileInfo(path, false, false, false);
} catch (UnresolvedPathException e) { } catch (UnresolvedPathException e) {
return e.getResolvedPath().toString(); return e.getResolvedPath().toString();
} catch (UnresolvedLinkException e) { } catch (UnresolvedLinkException e) {

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Supplier; import com.google.common.base.Supplier;
import org.apache.commons.lang.UnhandledException;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.CREATE;

View File

@ -72,12 +72,13 @@ public static LocatedBlocks getBlockLocations(NameNode namenode,
} }
public static HdfsFileStatus getFileInfo(NameNode namenode, String src, public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
boolean resolveLink) throws AccessControlException, UnresolvedLinkException, boolean resolveLink, boolean needLocation, boolean needBlockToken)
StandbyException, IOException { throws AccessControlException, UnresolvedLinkException, StandbyException,
IOException {
namenode.getNamesystem().readLock(); namenode.getNamesystem().readLock();
try { try {
return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem() return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
.getFSDirectory(), src, resolveLink); .getFSDirectory(), src, resolveLink, needLocation, needBlockToken);
} finally { } finally {
namenode.getNamesystem().readUnlock(); namenode.getNamesystem().readUnlock();
} }

View File

@ -274,7 +274,8 @@ public void testBackupNodeTailsEdits() throws Exception {
backup = startBackupNode(conf, StartupOption.BACKUP, 1); backup = startBackupNode(conf, StartupOption.BACKUP, 1);
testBNInSync(cluster, backup, 4); testBNInSync(cluster, backup, 4);
assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false)); assertNotNull(backup.getNamesystem()
.getFileInfo("/edit-while-bn-down", false, false, false));
// Trigger an unclean shutdown of the backup node. Backup node will not // Trigger an unclean shutdown of the backup node. Backup node will not
// unregister from the active when this is done simulating a node crash. // unregister from the active when this is done simulating a node crash.
@ -314,7 +315,8 @@ private void testBNInSync(MiniDFSCluster cluster, final BackupNode backup,
public Boolean get() { public Boolean get() {
LOG.info("Checking for " + src + " on BN"); LOG.info("Checking for " + src + " on BN");
try { try {
boolean hasFile = backup.getNamesystem().getFileInfo(src, false) != null; boolean hasFile = backup.getNamesystem()
.getFileInfo(src, false, false, false) != null;
boolean txnIdMatch = boolean txnIdMatch =
backup.getRpcServer().getTransactionID() == backup.getRpcServer().getTransactionID() ==
nn.getRpcServer().getTransactionID(); nn.getRpcServer().getTransactionID();
@ -465,7 +467,7 @@ void testCheckpoint(StartupOption op) throws Exception {
assertTrue("file3 does not exist on BackupNode", assertTrue("file3 does not exist on BackupNode",
op != StartupOption.BACKUP || op != StartupOption.BACKUP ||
backup.getNamesystem().getFileInfo( backup.getNamesystem().getFileInfo(
file3.toUri().getPath(), false) != null); file3.toUri().getPath(), false, false, false) != null);
} catch(IOException e) { } catch(IOException e) {
LOG.error("Error in TestBackupNode:", e); LOG.error("Error in TestBackupNode:", e);

View File

@ -288,7 +288,8 @@ public void testPreTxidEditLogWithEdits() throws Exception {
long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem); long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
assertEquals(3, numEdits); assertEquals(3, numEdits);
// Sanity check the edit // Sanity check the edit
HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false); HdfsFileStatus fileInfo =
namesystem.getFileInfo("/myfile", false, false, false);
assertEquals("supergroup", fileInfo.getGroup()); assertEquals("supergroup", fileInfo.getGroup());
assertEquals(3, fileInfo.getReplication()); assertEquals(3, fileInfo.getReplication());
} finally { } finally {

View File

@ -163,7 +163,8 @@ public void teardown() {
} }
private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception { private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception {
return fsn.getFileInfo(path.toString(), false).getFileEncryptionInfo(); return fsn.getFileInfo(path.toString(), false, false, false)
.getFileEncryptionInfo();
} }
@Test @Test
@ -1954,4 +1955,4 @@ protected void rollKey(final String keyName) throws Exception {
// after NN restart consistent. // after NN restart consistent.
dfsAdmin.getKeyProvider().flush(); dfsAdmin.getKeyProvider().flush();
} }
} }

View File

@ -745,7 +745,7 @@ private void doAnEdit(FSNamesystem fsn, int id) throws IOException {
private void checkEditExists(FSNamesystem fsn, int id) throws IOException { private void checkEditExists(FSNamesystem fsn, int id) throws IOException {
// Make sure the image loaded including our edit. // Make sure the image loaded including our edit.
assertNotNull(fsn.getFileInfo("/test" + id, false)); assertNotNull(fsn.getFileInfo("/test" + id, false, false, false));
} }
private Configuration getConf() throws IOException { private Configuration getConf() throws IOException {

View File

@ -124,7 +124,7 @@ public void testTailer() throws IOException, InterruptedException,
for (int i = 0; i < DIRS_TO_MAKE / 2; i++) { for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2, assertTrue(NameNodeAdapter.getFileInfo(nn2,
getDirPath(i), false).isDirectory()); getDirPath(i), false, false, false).isDirectory());
} }
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) { for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
@ -137,7 +137,7 @@ public void testTailer() throws IOException, InterruptedException,
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) { for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2, assertTrue(NameNodeAdapter.getFileInfo(nn2,
getDirPath(i), false).isDirectory()); getDirPath(i), false, false, false).isDirectory());
} }
} finally { } finally {
cluster.shutdown(); cluster.shutdown();

View File

@ -38,9 +38,10 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo;
import org.junit.Test; import org.junit.Test;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
@ -110,7 +111,8 @@ public void testStartup() throws Exception {
// the current log segment, and on the next roll, it would have to // the current log segment, and on the next roll, it would have to
// either replay starting in the middle of the segment (not allowed) // either replay starting in the middle of the segment (not allowed)
// or double-replay the edits (incorrect). // or double-replay the edits (incorrect).
assertNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), "/test", true)); assertNull(getFileInfo(cluster.getNameNode(1), "/test",
true, false, false));
cluster.getNameNode(0).getRpcServer().mkdirs("/test2", cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
FsPermission.createImmutable((short)0755), true); FsPermission.createImmutable((short)0755), true);
@ -122,8 +124,10 @@ public void testStartup() throws Exception {
// NN1 should have both the edits that came before its restart, and the edits that // NN1 should have both the edits that came before its restart, and the edits that
// came after its restart. // came after its restart.
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), "/test", true)); assertNotNull(getFileInfo(cluster.getNameNode(1), "/test",
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), "/test2", true)); true, false, false));
assertNotNull(getFileInfo(cluster.getNameNode(1), "/test2",
true, false, false));
} finally { } finally {
cluster.shutdown(); cluster.shutdown();
} }
@ -165,7 +169,8 @@ private void testFailoverFinalizesAndReadsInProgress(
// In the transition to active, it should have read the log -- and // In the transition to active, it should have read the log -- and
// hence see one of the dirs we made in the fake log. // hence see one of the dirs we made in the fake log.
String testPath = "/dir" + NUM_DIRS_IN_LOG; String testPath = "/dir" + NUM_DIRS_IN_LOG;
assertNotNull(cluster.getNameNode(0).getRpcServer().getFileInfo(testPath)); assertNotNull(cluster.getNameNode(0).getRpcServer()
.getFileInfo(testPath));
// It also should have finalized that log in the shared directory and started // It also should have finalized that log in the shared directory and started
// writing to a new one at the next txid. // writing to a new one at the next txid.

View File

@ -213,13 +213,13 @@ public void testFailuretoReadEdits() throws Exception {
// Null because it was deleted. // Null because it was deleted.
assertNull(NameNodeAdapter.getFileInfo(nn1, assertNull(NameNodeAdapter.getFileInfo(nn1,
TEST_DIR1, false)); TEST_DIR1, false, false, false));
// Should have been successfully created. // Should have been successfully created.
assertTrue(NameNodeAdapter.getFileInfo(nn1, assertTrue(NameNodeAdapter.getFileInfo(nn1,
TEST_DIR2, false).isDirectory()); TEST_DIR2, false, false, false).isDirectory());
// Null because it hasn't been created yet. // Null because it hasn't been created yet.
assertNull(NameNodeAdapter.getFileInfo(nn1, assertNull(NameNodeAdapter.getFileInfo(nn1,
TEST_DIR3, false)); TEST_DIR3, false, false, false));
// Now let the standby read ALL the edits. // Now let the standby read ALL the edits.
answer.setThrowExceptionOnRead(false); answer.setThrowExceptionOnRead(false);
@ -227,13 +227,13 @@ public void testFailuretoReadEdits() throws Exception {
// Null because it was deleted. // Null because it was deleted.
assertNull(NameNodeAdapter.getFileInfo(nn1, assertNull(NameNodeAdapter.getFileInfo(nn1,
TEST_DIR1, false)); TEST_DIR1, false, false, false));
// Should have been successfully created. // Should have been successfully created.
assertTrue(NameNodeAdapter.getFileInfo(nn1, assertTrue(NameNodeAdapter.getFileInfo(nn1,
TEST_DIR2, false).isDirectory()); TEST_DIR2, false, false, false).isDirectory());
// Should now have been successfully created. // Should now have been successfully created.
assertTrue(NameNodeAdapter.getFileInfo(nn1, assertTrue(NameNodeAdapter.getFileInfo(nn1,
TEST_DIR3, false).isDirectory()); TEST_DIR3, false, false, false).isDirectory());
} }
/** /**

View File

@ -128,7 +128,7 @@ private void assertCanStartHaNameNodes(String pathSuffix)
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1)); cluster.getNameNode(1));
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
newPath.toString(), false).isDirectory()); newPath.toString(), false, false, false).isDirectory());
} finally { } finally {
if (fs != null) { if (fs != null) {
fs.close(); fs.close();

View File

@ -35,8 +35,9 @@
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -116,8 +117,8 @@ public void testDefault() throws Exception {
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
// StandbyNameNode should not finish tailing in-progress logs // StandbyNameNode should not finish tailing in-progress logs
assertNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNull(getFileInfo(cluster.getNameNode(1),
"/test", true)); "/test", true, false, false));
// Restarting the standby should not finalize any edits files // Restarting the standby should not finalize any edits files
// in the shared directory when it starts up! // in the shared directory when it starts up!
@ -132,8 +133,8 @@ public void testDefault() throws Exception {
// the current log segment, and on the next roll, it would have to // the current log segment, and on the next roll, it would have to
// either replay starting in the middle of the segment (not allowed) // either replay starting in the middle of the segment (not allowed)
// or double-replay the edits (incorrect). // or double-replay the edits (incorrect).
assertNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNull(getFileInfo(cluster.getNameNode(1),
"/test", true)); "/test", true, false, false));
cluster.getNameNode(0).getRpcServer().mkdirs("/test2", cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
FsPermission.createImmutable((short) 0755), true); FsPermission.createImmutable((short) 0755), true);
@ -145,10 +146,10 @@ public void testDefault() throws Exception {
// NN1 should have both the edits that came before its restart, // NN1 should have both the edits that came before its restart,
// and the edits that came after its restart. // and the edits that came after its restart.
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNotNull(getFileInfo(cluster.getNameNode(1),
"/test", true)); "/test", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNotNull(getFileInfo(cluster.getNameNode(1),
"/test2", true)); "/test2", true, false, false));
} finally { } finally {
if (qjmhaCluster != null) { if (qjmhaCluster != null) {
qjmhaCluster.shutdown(); qjmhaCluster.shutdown();
@ -182,8 +183,8 @@ public void testSetup() throws Exception {
// After waiting for 5 seconds, StandbyNameNode should finish tailing // After waiting for 5 seconds, StandbyNameNode should finish tailing
// in-progress logs // in-progress logs
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNotNull(getFileInfo(cluster.getNameNode(1),
"/test", true)); "/test", true, false, false));
// Restarting the standby should not finalize any edits files // Restarting the standby should not finalize any edits files
// in the shared directory when it starts up! // in the shared directory when it starts up!
@ -194,8 +195,8 @@ public void testSetup() throws Exception {
assertNoEditFiles(cluster.getNameDirs(1)); assertNoEditFiles(cluster.getNameDirs(1));
// Because we're using in-progress tailer, this should not be null // Because we're using in-progress tailer, this should not be null
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNotNull(getFileInfo(cluster.getNameNode(1),
"/test", true)); "/test", true, false, false));
cluster.getNameNode(0).getRpcServer().mkdirs("/test2", cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
FsPermission.createImmutable((short) 0755), true); FsPermission.createImmutable((short) 0755), true);
@ -207,10 +208,10 @@ public void testSetup() throws Exception {
// NN1 should have both the edits that came before its restart, // NN1 should have both the edits that came before its restart,
// and the edits that came after its restart. // and the edits that came after its restart.
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNotNull(getFileInfo(cluster.getNameNode(1),
"/test", true)); "/test", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), assertNotNull(getFileInfo(cluster.getNameNode(1),
"/test2", true)); "/test2", true, false, false));
} }
@Test @Test
@ -229,7 +230,7 @@ public void testHalfStartInProgressTail() throws Exception {
nn1.getNamesystem().getEditLogTailer().doTailEdits(); nn1.getNamesystem().getEditLogTailer().doTailEdits();
// StandbyNameNode should tail the in-progress edit // StandbyNameNode should tail the in-progress edit
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true)); assertNotNull(getFileInfo(nn1, "/test", true, false, false));
// Create a new edit and finalized it // Create a new edit and finalized it
cluster.getNameNode(0).getRpcServer().mkdirs("/test2", cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
@ -237,7 +238,7 @@ public void testHalfStartInProgressTail() throws Exception {
nn0.getRpcServer().rollEditLog(); nn0.getRpcServer().rollEditLog();
// StandbyNameNode shouldn't tail the edit since we do not call the method // StandbyNameNode shouldn't tail the edit since we do not call the method
assertNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true)); assertNull(getFileInfo(nn1, "/test2", true, false, false));
// Create a new in-progress edit and let SBNN do the tail // Create a new in-progress edit and let SBNN do the tail
cluster.getNameNode(0).getRpcServer().mkdirs("/test3", cluster.getNameNode(0).getRpcServer().mkdirs("/test3",
@ -245,9 +246,9 @@ public void testHalfStartInProgressTail() throws Exception {
nn1.getNamesystem().getEditLogTailer().doTailEdits(); nn1.getNamesystem().getEditLogTailer().doTailEdits();
// StandbyNameNode should tail the finalized edit and the new in-progress // StandbyNameNode should tail the finalized edit and the new in-progress
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true)); assertNotNull(getFileInfo(nn1, "/test", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true)); assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true)); assertNotNull(getFileInfo(nn1, "/test3", true, false, false));
} }
@Test @Test
@ -270,16 +271,16 @@ public void testInitStartInProgressTail() throws Exception {
cluster.getNameNode(0).getRpcServer().mkdirs("/test3", cluster.getNameNode(0).getRpcServer().mkdirs("/test3",
FsPermission.createImmutable((short) 0755), true); FsPermission.createImmutable((short) 0755), true);
assertNull(NameNodeAdapter.getFileInfo(nn1, "/test", true)); assertNull(getFileInfo(nn1, "/test", true, false, false));
assertNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true)); assertNull(getFileInfo(nn1, "/test2", true, false, false));
assertNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true)); assertNull(getFileInfo(nn1, "/test3", true, false, false));
nn1.getNamesystem().getEditLogTailer().doTailEdits(); nn1.getNamesystem().getEditLogTailer().doTailEdits();
// StandbyNameNode shoudl tail the finalized edit and the new in-progress // StandbyNameNode shoudl tail the finalized edit and the new in-progress
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true)); assertNotNull(getFileInfo(nn1, "/test", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true)); assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true)); assertNotNull(getFileInfo(nn1, "/test3", true, false, false));
} }
@Test @Test
@ -296,17 +297,17 @@ public void testNewStartInProgressTail() throws Exception {
FsPermission.createImmutable((short) 0755), true); FsPermission.createImmutable((short) 0755), true);
nn1.getNamesystem().getEditLogTailer().doTailEdits(); nn1.getNamesystem().getEditLogTailer().doTailEdits();
nn0.getRpcServer().rollEditLog(); nn0.getRpcServer().rollEditLog();
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true)); assertNotNull(getFileInfo(nn1, "/test", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true)); assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
cluster.getNameNode(0).getRpcServer().mkdirs("/test3", cluster.getNameNode(0).getRpcServer().mkdirs("/test3",
FsPermission.createImmutable((short) 0755), true); FsPermission.createImmutable((short) 0755), true);
nn1.getNamesystem().getEditLogTailer().doTailEdits(); nn1.getNamesystem().getEditLogTailer().doTailEdits();
// StandbyNameNode shoudl tail the finalized edit and the new in-progress // StandbyNameNode shoudl tail the finalized edit and the new in-progress
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true)); assertNotNull(getFileInfo(nn1, "/test", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true)); assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true)); assertNotNull(getFileInfo(nn1, "/test3", true, false, false));
} }
@Test @Test
@ -325,7 +326,7 @@ public void testNonUniformConfig() throws Exception {
cluster.getNameNode(0).getRpcServer().rollEdits(); cluster.getNameNode(0).getRpcServer().rollEdits();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true)); assertNotNull(getFileInfo(nn1, "/test", true, false, false));
} }
/** /**

View File

@ -108,7 +108,7 @@
<property> <property>
<name>fs.contract.supports-content-check</name> <name>fs.contract.supports-content-check</name>
<value>false</value> <value>true</value>
</property> </property>
</configuration> </configuration>