HDFS-5962. Mtime and atime are not persisted for symbolic links. Contributed by Akira Ajisaka.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1570252 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-02-20 16:09:20 +00:00
parent 077adb25b7
commit b23f6cc1f2
6 changed files with 65 additions and 7 deletions

View File

@ -537,6 +537,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5979. Typo and logger fix for fsimage PB code. (wang) HDFS-5979. Typo and logger fix for fsimage PB code. (wang)
HDFS-5962. Mtime and atime are not persisted for symbolic links. (Akira
Ajisaka via kihwal)
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

View File

@ -279,7 +279,8 @@ public final class FSImageFormatPBINode {
parent.getLoaderContext().getStringTable()); parent.getLoaderContext().getStringTable());
INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(), INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
permissions, 0, 0, s.getTarget().toStringUtf8()); permissions, s.getModificationTime(), s.getAccessTime(),
s.getTarget().toStringUtf8());
return sym; return sym;
} }
@ -482,7 +483,9 @@ public final class FSImageFormatPBINode {
INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
.newBuilder() .newBuilder()
.setPermission(buildPermissionStatus(n, state.getStringMap())) .setPermission(buildPermissionStatus(n, state.getStringMap()))
.setTarget(ByteString.copyFrom(n.getSymlink())); .setTarget(ByteString.copyFrom(n.getSymlink()))
.setModificationTime(n.getModificationTime())
.setAccessTime(n.getAccessTime());
INodeSection.INode r = buildINodeCommon(n) INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build();

View File

@ -170,8 +170,9 @@ final class LsrPBImage {
PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission(
d.getPermission(), stringTable); d.getPermission(), stringTable);
out.print(String.format("-%s - %8s %10s %10s %10d %s%s -> %s\n", p out.print(String.format("-%s - %8s %10s %10s %10d %s%s -> %s\n", p
.getPermission().toString(), p.getUserName(), p.getGroupName(), 0, 0, .getPermission().toString(), p.getUserName(), p.getGroupName(), d
parent, inode.getName().toStringUtf8(), d.getTarget().toStringUtf8())); .getModificationTime(), 0, parent, inode.getName().toStringUtf8(),
d.getTarget().toStringUtf8()));
} }
break; break;
default: default:

View File

@ -289,8 +289,9 @@ public final class PBImageXmlWriter {
} }
private void dumpINodeSymlink(INodeSymlink s) { private void dumpINodeSymlink(INodeSymlink s) {
o("permission", dumpPermission(s.getPermission())).o("target", o("permission", dumpPermission(s.getPermission()))
s.getTarget().toStringUtf8()); .o("target", s.getTarget().toStringUtf8())
.o("mtime", s.getModificationTime()).o("atime", s.getAccessTime());
} }
private void dumpNameSection(InputStream in) throws IOException { private void dumpNameSection(InputStream in) throws IOException {

View File

@ -130,6 +130,8 @@ message INodeSection {
message INodeSymlink { message INodeSymlink {
optional fixed64 permission = 1; optional fixed64 permission = 1;
optional bytes target = 2; optional bytes target = 2;
optional uint64 modificationTime = 3;
optional uint64 accessTime = 4;
} }
message INode { message INode {
@ -300,4 +302,4 @@ message CacheManagerSection {
required uint32 numDirectives = 3; required uint32 numDirectives = 3;
// repeated CachePoolInfoProto pools // repeated CachePoolInfoProto pools
// repeated CacheDirectiveInfoProto directives // repeated CacheDirectiveInfoProto directives
} }

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@ -135,4 +136,51 @@ public class TestFSImage {
} }
} }
} }
/**
* Ensure mtime and atime can be loaded from fsimage.
*/
@Test(timeout=60000)
public void testLoadMtimeAtime() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
String userDir = hdfs.getHomeDirectory().toUri().getPath().toString();
Path file = new Path(userDir, "file");
Path dir = new Path(userDir, "/dir");
Path link = new Path(userDir, "/link");
hdfs.createNewFile(file);
hdfs.mkdirs(dir);
hdfs.createSymlink(file, link, false);
long mtimeFile = hdfs.getFileStatus(file).getModificationTime();
long atimeFile = hdfs.getFileStatus(file).getAccessTime();
long mtimeDir = hdfs.getFileStatus(dir).getModificationTime();
long mtimeLink = hdfs.getFileLinkStatus(link).getModificationTime();
long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime();
// save namespace and restart cluster
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(1).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
assertEquals(mtimeFile, hdfs.getFileStatus(file).getModificationTime());
assertEquals(atimeFile, hdfs.getFileStatus(file).getAccessTime());
assertEquals(mtimeDir, hdfs.getFileStatus(dir).getModificationTime());
assertEquals(mtimeLink, hdfs.getFileLinkStatus(link).getModificationTime());
assertEquals(atimeLink, hdfs.getFileLinkStatus(link).getAccessTime());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }