HDFS-9721. Allow Delimited PB OIV tool to run upon fsimage that contains INodeReference. (Xiao Chen via lei)
(cherry picked from commit 9d494f0c0e
)
This commit is contained in:
parent
94950c15c2
commit
dfbd0d4df4
|
@ -894,6 +894,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-9638. Improve DistCp Help and documentation.
|
HDFS-9638. Improve DistCp Help and documentation.
|
||||||
(Wei-Chiu Chuang via Yongjun Zhang)
|
(Wei-Chiu Chuang via Yongjun Zhang)
|
||||||
|
|
||||||
|
HDFS-9721. Allow Delimited PB OIV tool to run upon fsimage that contains
|
||||||
|
INodeReference. (Xiao Chen via lei)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -204,7 +204,7 @@ class FSImageLoader {
|
||||||
return dirs;
|
return dirs;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static ImmutableList<Long> loadINodeReferenceSection(InputStream in)
|
static ImmutableList<Long> loadINodeReferenceSection(InputStream in)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
LOG.info("Loading inode references");
|
LOG.info("Loading inode references");
|
||||||
ImmutableList.Builder<Long> builder = ImmutableList.builder();
|
ImmutableList.Builder<Long> builder = ImmutableList.builder();
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Signals that a snapshot is ignored.
|
||||||
|
*/
|
||||||
|
public class IgnoreSnapshotException extends IOException {
|
||||||
|
public IgnoreSnapshotException() {
|
||||||
|
}
|
||||||
|
}
|
|
@ -84,6 +84,7 @@ public class PBImageDelimitedTextWriter extends PBImageTextWriter {
|
||||||
inodeName.isEmpty() ? "/" : inodeName);
|
inodeName.isEmpty() ? "/" : inodeName);
|
||||||
buffer.append(path.toString());
|
buffer.append(path.toString());
|
||||||
PermissionStatus p = null;
|
PermissionStatus p = null;
|
||||||
|
boolean isDir = false;
|
||||||
|
|
||||||
switch (inode.getType()) {
|
switch (inode.getType()) {
|
||||||
case FILE:
|
case FILE:
|
||||||
|
@ -109,6 +110,7 @@ public class PBImageDelimitedTextWriter extends PBImageTextWriter {
|
||||||
append(buffer, 0); // Num bytes.
|
append(buffer, 0); // Num bytes.
|
||||||
append(buffer, dir.getNsQuota());
|
append(buffer, dir.getNsQuota());
|
||||||
append(buffer, dir.getDsQuota());
|
append(buffer, dir.getDsQuota());
|
||||||
|
isDir = true;
|
||||||
break;
|
break;
|
||||||
case SYMLINK:
|
case SYMLINK:
|
||||||
INodeSymlink s = inode.getSymlink();
|
INodeSymlink s = inode.getSymlink();
|
||||||
|
@ -126,9 +128,28 @@ public class PBImageDelimitedTextWriter extends PBImageTextWriter {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
assert p != null;
|
assert p != null;
|
||||||
append(buffer, p.getPermission().toString());
|
String dirString = isDir ? "d" : "-";
|
||||||
|
append(buffer, dirString + p.getPermission().toString());
|
||||||
append(buffer, p.getUserName());
|
append(buffer, p.getUserName());
|
||||||
append(buffer, p.getGroupName());
|
append(buffer, p.getGroupName());
|
||||||
return buffer.toString();
|
return buffer.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHeader() {
|
||||||
|
StringBuffer buffer = new StringBuffer();
|
||||||
|
buffer.append("Path");
|
||||||
|
append(buffer, "Replication");
|
||||||
|
append(buffer, "ModificationTime");
|
||||||
|
append(buffer, "AccessTime");
|
||||||
|
append(buffer, "PreferredBlockSize");
|
||||||
|
append(buffer, "BlocksCount");
|
||||||
|
append(buffer, "FileSize");
|
||||||
|
append(buffer, "NSQUOTA");
|
||||||
|
append(buffer, "DSQUOTA");
|
||||||
|
append(buffer, "Permission");
|
||||||
|
append(buffer, "UserName");
|
||||||
|
append(buffer, "GroupName");
|
||||||
|
return buffer.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
|
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -195,13 +196,17 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
dirMap.put(p.getId(), dir);
|
dirMap.put(p.getId(), dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public String getParentPath(long inode) throws IOException {
|
public String getParentPath(long inode) throws IOException {
|
||||||
if (inode == INodeId.ROOT_INODE_ID) {
|
if (inode == INodeId.ROOT_INODE_ID) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
Dir parent = dirChildMap.get(inode);
|
Dir parent = dirChildMap.get(inode);
|
||||||
Preconditions.checkState(parent != null,
|
if (parent == null) {
|
||||||
"Can not find parent directory for INode: %s", inode);
|
// The inode is an INodeReference, which is generated from snapshot.
|
||||||
|
// For delimited oiv tool, no need to print out metadata in snapshots.
|
||||||
|
PBImageTextWriter.ignoreSnapshotName(inode);
|
||||||
|
}
|
||||||
return parent.getPath();
|
return parent.getPath();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -353,16 +358,22 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
return "/";
|
return "/";
|
||||||
}
|
}
|
||||||
byte[] bytes = dirChildMap.get(toBytes(inode));
|
byte[] bytes = dirChildMap.get(toBytes(inode));
|
||||||
Preconditions.checkState(bytes != null && bytes.length == 8,
|
if (bytes == null) {
|
||||||
"Can not find parent directory for inode %s, "
|
// The inode is an INodeReference, which is generated from snapshot.
|
||||||
+ "fsimage might be corrupted", inode);
|
// For delimited oiv tool, no need to print out metadata in snapshots.
|
||||||
|
PBImageTextWriter.ignoreSnapshotName(inode);
|
||||||
|
}
|
||||||
|
if (bytes.length != 8) {
|
||||||
|
throw new IOException(
|
||||||
|
"bytes array length error. Actual length is " + bytes.length);
|
||||||
|
}
|
||||||
long parent = toLong(bytes);
|
long parent = toLong(bytes);
|
||||||
if (!dirPathCache.containsKey(parent)) {
|
if (!dirPathCache.containsKey(parent)) {
|
||||||
bytes = dirMap.get(toBytes(parent));
|
bytes = dirMap.get(toBytes(parent));
|
||||||
if (parent != INodeId.ROOT_INODE_ID) {
|
if (parent != INodeId.ROOT_INODE_ID && bytes == null) {
|
||||||
Preconditions.checkState(bytes != null,
|
// The parent is an INodeReference, which is generated from snapshot.
|
||||||
"Can not find parent directory for inode %s, "
|
// For delimited oiv tool, no need to print out metadata in snapshots.
|
||||||
+ ", the fsimage might be corrupted.", parent);
|
PBImageTextWriter.ignoreSnapshotName(parent);
|
||||||
}
|
}
|
||||||
String parentName = toString(bytes);
|
String parentName = toString(bytes);
|
||||||
String parentPath =
|
String parentPath =
|
||||||
|
@ -401,6 +412,7 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
|
out.flush();
|
||||||
IOUtils.cleanup(null, metadataMap);
|
IOUtils.cleanup(null, metadataMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,6 +423,11 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
*/
|
*/
|
||||||
abstract protected String getEntry(String parent, INode inode);
|
abstract protected String getEntry(String parent, INode inode);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get text output for the header line.
|
||||||
|
*/
|
||||||
|
abstract protected String getHeader();
|
||||||
|
|
||||||
public void visit(RandomAccessFile file) throws IOException {
|
public void visit(RandomAccessFile file) throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
if (!FSImageUtil.checkFileFormat(file)) {
|
if (!FSImageUtil.checkFileFormat(file)) {
|
||||||
|
@ -442,6 +459,7 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
ImmutableList<Long> refIdList = null;
|
||||||
for (FileSummary.Section section : sections) {
|
for (FileSummary.Section section : sections) {
|
||||||
fin.getChannel().position(section.getOffset());
|
fin.getChannel().position(section.getOffset());
|
||||||
is = FSImageUtil.wrapInputStreamForCompression(conf,
|
is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||||
|
@ -449,15 +467,22 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
fin, section.getLength())));
|
fin, section.getLength())));
|
||||||
switch (SectionName.fromString(section.getName())) {
|
switch (SectionName.fromString(section.getName())) {
|
||||||
case STRING_TABLE:
|
case STRING_TABLE:
|
||||||
|
LOG.info("Loading string table");
|
||||||
stringTable = FSImageLoader.loadStringTable(is);
|
stringTable = FSImageLoader.loadStringTable(is);
|
||||||
break;
|
break;
|
||||||
|
case INODE_REFERENCE:
|
||||||
|
// Load INodeReference so that all INodes can be processed.
|
||||||
|
// Snapshots are not handled and will just be ignored for now.
|
||||||
|
LOG.info("Loading inode references");
|
||||||
|
refIdList = FSImageLoader.loadINodeReferenceSection(is);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
loadDirectories(fin, sections, summary, conf);
|
loadDirectories(fin, sections, summary, conf);
|
||||||
loadINodeDirSection(fin, sections, summary, conf);
|
loadINodeDirSection(fin, sections, summary, conf, refIdList);
|
||||||
metadataMap.sync();
|
metadataMap.sync();
|
||||||
output(conf, summary, fin, sections);
|
output(conf, summary, fin, sections);
|
||||||
}
|
}
|
||||||
|
@ -468,6 +493,7 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
InputStream is;
|
InputStream is;
|
||||||
long startTime = Time.monotonicNow();
|
long startTime = Time.monotonicNow();
|
||||||
|
out.println(getHeader());
|
||||||
for (FileSummary.Section section : sections) {
|
for (FileSummary.Section section : sections) {
|
||||||
if (SectionName.fromString(section.getName()) == SectionName.INODE) {
|
if (SectionName.fromString(section.getName()) == SectionName.INODE) {
|
||||||
fin.getChannel().position(section.getOffset());
|
fin.getChannel().position(section.getOffset());
|
||||||
|
@ -508,7 +534,7 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
|
|
||||||
private void loadINodeDirSection(
|
private void loadINodeDirSection(
|
||||||
FileInputStream fin, List<FileSummary.Section> sections,
|
FileInputStream fin, List<FileSummary.Section> sections,
|
||||||
FileSummary summary, Configuration conf)
|
FileSummary summary, Configuration conf, List<Long> refIdList)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
LOG.info("Loading INode directory section.");
|
LOG.info("Loading INode directory section.");
|
||||||
long startTime = Time.monotonicNow();
|
long startTime = Time.monotonicNow();
|
||||||
|
@ -519,7 +545,7 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||||
summary.getCodec(), new BufferedInputStream(
|
summary.getCodec(), new BufferedInputStream(
|
||||||
new LimitInputStream(fin, section.getLength())));
|
new LimitInputStream(fin, section.getLength())));
|
||||||
buildNamespace(is);
|
buildNamespace(is, refIdList);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
long timeTaken = Time.monotonicNow() - startTime;
|
long timeTaken = Time.monotonicNow() - startTime;
|
||||||
|
@ -549,7 +575,8 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Scan the INodeDirectory section to construct the namespace.
|
* Scan the INodeDirectory section to construct the namespace.
|
||||||
*/
|
*/
|
||||||
private void buildNamespace(InputStream in) throws IOException {
|
private void buildNamespace(InputStream in, List<Long> refIdList)
|
||||||
|
throws IOException {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
FsImageProto.INodeDirectorySection.DirEntry e =
|
FsImageProto.INodeDirectorySection.DirEntry e =
|
||||||
|
@ -562,12 +589,15 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
LOG.debug("Scanned {} directories.", count);
|
LOG.debug("Scanned {} directories.", count);
|
||||||
}
|
}
|
||||||
long parentId = e.getParent();
|
long parentId = e.getParent();
|
||||||
// Referred INode is not support for now.
|
|
||||||
for (int i = 0; i < e.getChildrenCount(); i++) {
|
for (int i = 0; i < e.getChildrenCount(); i++) {
|
||||||
long childId = e.getChildren(i);
|
long childId = e.getChildren(i);
|
||||||
metadataMap.putDirChild(parentId, childId);
|
metadataMap.putDirChild(parentId, childId);
|
||||||
}
|
}
|
||||||
Preconditions.checkState(e.getRefChildrenCount() == 0);
|
for (int i = e.getChildrenCount();
|
||||||
|
i < e.getChildrenCount() + e.getRefChildrenCount(); i++) {
|
||||||
|
int refId = e.getRefChildren(i - e.getChildrenCount());
|
||||||
|
metadataMap.putDirChild(parentId, refIdList.get(refId));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
LOG.info("Scanned {} INode directories to build namespace.", count);
|
LOG.info("Scanned {} INode directories to build namespace.", count);
|
||||||
}
|
}
|
||||||
|
@ -575,15 +605,41 @@ abstract class PBImageTextWriter implements Closeable {
|
||||||
private void outputINodes(InputStream in) throws IOException {
|
private void outputINodes(InputStream in) throws IOException {
|
||||||
INodeSection s = INodeSection.parseDelimitedFrom(in);
|
INodeSection s = INodeSection.parseDelimitedFrom(in);
|
||||||
LOG.info("Found {} INodes in the INode section", s.getNumInodes());
|
LOG.info("Found {} INodes in the INode section", s.getNumInodes());
|
||||||
|
long ignored = 0;
|
||||||
|
long ignoredSnapshots = 0;
|
||||||
for (int i = 0; i < s.getNumInodes(); ++i) {
|
for (int i = 0; i < s.getNumInodes(); ++i) {
|
||||||
INode p = INode.parseDelimitedFrom(in);
|
INode p = INode.parseDelimitedFrom(in);
|
||||||
|
try {
|
||||||
String parentPath = metadataMap.getParentPath(p.getId());
|
String parentPath = metadataMap.getParentPath(p.getId());
|
||||||
out.println(getEntry(parentPath, p));
|
out.println(getEntry(parentPath, p));
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
ignored++;
|
||||||
|
if (!(ioe instanceof IgnoreSnapshotException)) {
|
||||||
|
LOG.warn("Exception caught, ignoring node:{}", p.getId(), ioe);
|
||||||
|
} else {
|
||||||
|
ignoredSnapshots++;
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Exception caught, ignoring node:{}.", p.getId(), ioe);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled() && i % 100000 == 0) {
|
if (LOG.isDebugEnabled() && i % 100000 == 0) {
|
||||||
LOG.debug("Outputted {} INodes.", i);
|
LOG.debug("Outputted {} INodes.", i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (ignored > 0) {
|
||||||
|
LOG.warn("Ignored {} nodes, including {} in snapshots. Please turn on"
|
||||||
|
+ " debug log for details", ignored, ignoredSnapshots);
|
||||||
|
}
|
||||||
LOG.info("Outputted {} INodes.", s.getNumInodes());
|
LOG.info("Outputted {} INodes.", s.getNumInodes());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ignoreSnapshotName(long inode) throws IOException {
|
||||||
|
// Ignore snapshots - we want the output similar to -ls -R.
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("No snapshot name found for inode {}", inode);
|
||||||
|
}
|
||||||
|
throw new IgnoreSnapshotException();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,6 +87,7 @@ public class TestOfflineImageViewer {
|
||||||
|
|
||||||
// namespace as written to dfs, to be compared with viewer's output
|
// namespace as written to dfs, to be compared with viewer's output
|
||||||
final static HashMap<String, FileStatus> writtenFiles = Maps.newHashMap();
|
final static HashMap<String, FileStatus> writtenFiles = Maps.newHashMap();
|
||||||
|
static int dirCount = 0;
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public TemporaryFolder folder = new TemporaryFolder();
|
public TemporaryFolder folder = new TemporaryFolder();
|
||||||
|
@ -113,7 +114,7 @@ public class TestOfflineImageViewer {
|
||||||
DistributedFileSystem hdfs = cluster.getFileSystem();
|
DistributedFileSystem hdfs = cluster.getFileSystem();
|
||||||
|
|
||||||
// Create a reasonable namespace
|
// Create a reasonable namespace
|
||||||
for (int i = 0; i < NUM_DIRS; i++) {
|
for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
|
||||||
Path dir = new Path("/dir" + i);
|
Path dir = new Path("/dir" + i);
|
||||||
hdfs.mkdirs(dir);
|
hdfs.mkdirs(dir);
|
||||||
writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
|
writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
|
||||||
|
@ -131,11 +132,13 @@ public class TestOfflineImageViewer {
|
||||||
// Create an empty directory
|
// Create an empty directory
|
||||||
Path emptydir = new Path("/emptydir");
|
Path emptydir = new Path("/emptydir");
|
||||||
hdfs.mkdirs(emptydir);
|
hdfs.mkdirs(emptydir);
|
||||||
|
dirCount++;
|
||||||
writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
|
writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
|
||||||
|
|
||||||
//Create a directory whose name should be escaped in XML
|
//Create a directory whose name should be escaped in XML
|
||||||
Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
|
Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
|
||||||
hdfs.mkdirs(invalidXMLDir);
|
hdfs.mkdirs(invalidXMLDir);
|
||||||
|
dirCount++;
|
||||||
|
|
||||||
// Get delegation tokens so we log the delegation token op
|
// Get delegation tokens so we log the delegation token op
|
||||||
Token<?>[] delegationTokens = hdfs
|
Token<?>[] delegationTokens = hdfs
|
||||||
|
@ -144,15 +147,24 @@ public class TestOfflineImageViewer {
|
||||||
LOG.debug("got token " + t);
|
LOG.debug("got token " + t);
|
||||||
}
|
}
|
||||||
|
|
||||||
final Path snapshot = new Path("/snapshot");
|
// Create INodeReference
|
||||||
hdfs.mkdirs(snapshot);
|
final Path src = new Path("/src");
|
||||||
hdfs.allowSnapshot(snapshot);
|
hdfs.mkdirs(src);
|
||||||
hdfs.mkdirs(new Path("/snapshot/1"));
|
dirCount++;
|
||||||
hdfs.delete(snapshot, true);
|
writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
|
||||||
|
final Path orig = new Path("/src/orig");
|
||||||
|
hdfs.mkdirs(orig);
|
||||||
|
hdfs.allowSnapshot(src);
|
||||||
|
hdfs.createSnapshot(src, "snapshot");
|
||||||
|
final Path dst = new Path("/dst");
|
||||||
|
hdfs.rename(orig, dst);
|
||||||
|
dirCount++;
|
||||||
|
writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
|
||||||
|
|
||||||
// Set XAttrs so the fsimage contains XAttr ops
|
// Set XAttrs so the fsimage contains XAttr ops
|
||||||
final Path xattr = new Path("/xattr");
|
final Path xattr = new Path("/xattr");
|
||||||
hdfs.mkdirs(xattr);
|
hdfs.mkdirs(xattr);
|
||||||
|
dirCount++;
|
||||||
hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
|
hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
|
||||||
hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
|
hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
|
||||||
// OIV should be able to handle empty value XAttrs
|
// OIV should be able to handle empty value XAttrs
|
||||||
|
@ -232,8 +244,8 @@ public class TestOfflineImageViewer {
|
||||||
matcher = p.matcher(outputString);
|
matcher = p.matcher(outputString);
|
||||||
assertTrue(matcher.find() && matcher.groupCount() == 1);
|
assertTrue(matcher.find() && matcher.groupCount() == 1);
|
||||||
int totalDirs = Integer.parseInt(matcher.group(1));
|
int totalDirs = Integer.parseInt(matcher.group(1));
|
||||||
// totalDirs includes root directory, empty directory, and xattr directory
|
// totalDirs includes root directory
|
||||||
assertEquals(NUM_DIRS + 4, totalDirs);
|
assertEquals(dirCount + 1, totalDirs);
|
||||||
|
|
||||||
FileStatus maxFile = Collections.max(writtenFiles.values(),
|
FileStatus maxFile = Collections.max(writtenFiles.values(),
|
||||||
new Comparator<FileStatus>() {
|
new Comparator<FileStatus>() {
|
||||||
|
@ -285,7 +297,7 @@ public class TestOfflineImageViewer {
|
||||||
|
|
||||||
// verify the number of directories
|
// verify the number of directories
|
||||||
FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
|
FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
|
||||||
assertEquals(NUM_DIRS + 3, statuses.length); // contains empty and xattr directory
|
assertEquals(dirCount, statuses.length);
|
||||||
|
|
||||||
// verify the number of files in the directory
|
// verify the number of files in the directory
|
||||||
statuses = webhdfs.listStatus(new Path("/dir0"));
|
statuses = webhdfs.listStatus(new Path("/dir0"));
|
||||||
|
@ -393,12 +405,16 @@ public class TestOfflineImageViewer {
|
||||||
BufferedReader reader =
|
BufferedReader reader =
|
||||||
new BufferedReader(new InputStreamReader(input))) {
|
new BufferedReader(new InputStreamReader(input))) {
|
||||||
String line;
|
String line;
|
||||||
|
boolean header = true;
|
||||||
while ((line = reader.readLine()) != null) {
|
while ((line = reader.readLine()) != null) {
|
||||||
System.out.println(line);
|
System.out.println(line);
|
||||||
String[] fields = line.split(DELIMITER);
|
String[] fields = line.split(DELIMITER);
|
||||||
assertEquals(12, fields.length);
|
assertEquals(12, fields.length);
|
||||||
|
if (!header) {
|
||||||
fileNames.add(fields[0]);
|
fileNames.add(fields[0]);
|
||||||
}
|
}
|
||||||
|
header = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// writtenFiles does not contain root directory and "invalid XML char" dir.
|
// writtenFiles does not contain root directory and "invalid XML char" dir.
|
||||||
|
|
Loading…
Reference in New Issue