svn merge -c 1570252 meging from trunk to branch-2 to fix:HDFS-5962. Mtime and atime are not persisted for symbolic links.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1570255 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-02-20 16:16:32 +00:00
parent 7feb5bc8aa
commit 266a2aadf6
6 changed files with 66 additions and 8 deletions

View File

@ -208,6 +208,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5979. Typo and logger fix for fsimage PB code. (wang)
HDFS-5962. Mtime and atime are not persisted for symbolic links. (Akira
Ajisaka via kihwal)
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

View File

@ -227,8 +227,10 @@ public final class FSImageFormatPBINode {
INodeSection.INodeSymlink s = n.getSymlink();
final PermissionStatus permissions = loadPermission(s.getPermission(),
parent.getLoaderContext().getStringTable());
return new INodeSymlink(n.getId(), n.getName().toByteArray(), permissions,
0, 0, s.getTarget().toStringUtf8());
INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
permissions, s.getModificationTime(), s.getAccessTime(),
s.getTarget().toStringUtf8());
return sym;
}
private void loadRootINode(INodeSection.INode p) {
@ -408,7 +410,9 @@ public final class FSImageFormatPBINode {
INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
.newBuilder()
.setPermission(buildPermissionStatus(n, parent.getSaverContext().getStringMap()))
.setTarget(ByteString.copyFrom(n.getSymlink()));
.setTarget(ByteString.copyFrom(n.getSymlink()))
.setModificationTime(n.getModificationTime())
.setAccessTime(n.getAccessTime());
INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build();
r.writeDelimitedTo(out);

View File

@ -170,8 +170,9 @@ final class LsrPBImage {
PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission(
d.getPermission(), stringTable);
out.print(String.format("-%s - %8s %10s %10s %10d %s%s -> %s\n", p
.getPermission().toString(), p.getUserName(), p.getGroupName(), 0, 0,
parent, inode.getName().toStringUtf8(), d.getTarget().toStringUtf8()));
.getPermission().toString(), p.getUserName(), p.getGroupName(), d
.getModificationTime(), 0, parent, inode.getName().toStringUtf8(),
d.getTarget().toStringUtf8()));
}
break;
default:

View File

@ -289,8 +289,9 @@ public final class PBImageXmlWriter {
}
private void dumpINodeSymlink(INodeSymlink s) {
o("permission", dumpPermission(s.getPermission())).o("target",
s.getTarget().toStringUtf8());
o("permission", dumpPermission(s.getPermission()))
.o("target", s.getTarget().toStringUtf8())
.o("mtime", s.getModificationTime()).o("atime", s.getAccessTime());
}
private void dumpNameSection(InputStream in) throws IOException {

View File

@ -110,6 +110,8 @@ message INodeSection {
message INodeSymlink {
optional fixed64 permission = 1;
optional bytes target = 2;
optional uint64 modificationTime = 3;
optional uint64 accessTime = 4;
}
message INode {
@ -281,4 +283,3 @@ message CacheManagerSection {
// repeated CachePoolInfoProto pools
// repeated CacheDirectiveInfoProto directives
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@ -135,4 +136,51 @@ public class TestFSImage {
}
}
}
/**
* Ensure mtime and atime can be loaded from fsimage.
*/
@Test(timeout=60000)
public void testLoadMtimeAtime() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
String userDir = hdfs.getHomeDirectory().toUri().getPath().toString();
Path file = new Path(userDir, "file");
Path dir = new Path(userDir, "/dir");
Path link = new Path(userDir, "/link");
hdfs.createNewFile(file);
hdfs.mkdirs(dir);
hdfs.createSymlink(file, link, false);
long mtimeFile = hdfs.getFileStatus(file).getModificationTime();
long atimeFile = hdfs.getFileStatus(file).getAccessTime();
long mtimeDir = hdfs.getFileStatus(dir).getModificationTime();
long mtimeLink = hdfs.getFileLinkStatus(link).getModificationTime();
long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime();
// save namespace and restart cluster
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(1).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
assertEquals(mtimeFile, hdfs.getFileStatus(file).getModificationTime());
assertEquals(atimeFile, hdfs.getFileStatus(file).getAccessTime());
assertEquals(mtimeDir, hdfs.getFileStatus(dir).getModificationTime());
assertEquals(mtimeLink, hdfs.getFileLinkStatus(link).getModificationTime());
assertEquals(atimeLink, hdfs.getFileLinkStatus(link).getAccessTime());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}