HDFS-4574. Move Diff to the util package.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1454988 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-03-11 03:45:52 +00:00
parent d32fb8a62b
commit 43f8d0b9c9
13 changed files with 39 additions and 45 deletions

View File

@ -190,3 +190,5 @@ Branch-2802 Snapshot (Unreleased)
(Jing Zhao via szetszwo) (Jing Zhao via szetszwo)
HDFS-4579. Annotate snapshot tests. (Arpit Agarwal via suresh) HDFS-4579. Annotate snapshot tests. (Arpit Agarwal via suresh)
HDFS-4574. Move Diff to the util package. (szetszwo)

View File

@ -1558,7 +1558,7 @@ public class FSDirectory implements Closeable {
private static INode[] getRelativePathINodes(INode inode, INode ancestor) { private static INode[] getRelativePathINodes(INode inode, INode ancestor) {
// calculate the depth of this inode from the ancestor // calculate the depth of this inode from the ancestor
int depth = 0; int depth = 0;
for (INode i = inode; i != null && !i.equals(ancestor); i = i.parent) { for (INode i = inode; i != null && !i.equals(ancestor); i = i.getParent()) {
depth++; depth++;
} }
INode[] inodes = new INode[depth]; INode[] inodes = new INode[depth];
@ -1571,7 +1571,7 @@ public class FSDirectory implements Closeable {
return null; return null;
} }
inodes[depth-i-1] = inode; inodes[depth-i-1] = inode;
inode = inode.parent; inode = inode.getParent();
} }
return inodes; return inodes;
} }

View File

@ -34,8 +34,10 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key; import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff; import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -109,7 +111,7 @@ public abstract class INode implements Diff.Element<byte[]> {
* should not modify it. * should not modify it.
*/ */
private long permission = 0L; private long permission = 0L;
INodeDirectory parent = null; private INodeDirectory parent = null;
private long modificationTime = 0L; private long modificationTime = 0L;
private long accessTime = 0L; private long accessTime = 0L;
@ -509,16 +511,6 @@ public abstract class INode implements Diff.Element<byte[]> {
return name == null? null: DFSUtil.bytes2String(name); return name == null? null: DFSUtil.bytes2String(name);
} }
String getLocalParentDir() {
INode inode = isRoot() ? this : getParent();
String parentDir = "";
if (inode != null) {
parentDir = inode.getFullPathName();
}
return (parentDir != null) ? parentDir : "";
}
/** /**
* @return null if the local name is null; * @return null if the local name is null;
* otherwise, return the local name byte array. * otherwise, return the local name byte array.
@ -532,13 +524,6 @@ public abstract class INode implements Diff.Element<byte[]> {
return getLocalNameBytes(); return getLocalNameBytes();
} }
/**
* Set local file name
*/
public void setLocalName(String name) {
setLocalName(DFSUtil.string2Bytes(name));
}
/** /**
* Set local file name * Set local file name
*/ */

View File

@ -82,7 +82,7 @@ public class INodeDirectory extends INode {
this.children = other.children; this.children = other.children;
if (adopt && this.children != null) { if (adopt && this.children != null) {
for (INode child : children) { for (INode child : children) {
child.parent = this; child.setParent(this);
} }
} }
} }
@ -529,7 +529,7 @@ public class INodeDirectory extends INode {
if (children == null) { if (children == null) {
children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY); children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
} }
node.parent = this; node.setParent(this);
children.add(-insertionPoint - 1, node); children.add(-insertionPoint - 1, node);
if (node.getGroupName() == null) { if (node.getGroupName() == null) {

View File

@ -157,7 +157,7 @@ public class INodeFile extends INode implements BlockCollection {
public INodeFile recordModification(final Snapshot latest) public INodeFile recordModification(final Snapshot latest)
throws NSQuotaExceededException { throws NSQuotaExceededException {
return isInLatestSnapshot(latest)? return isInLatestSnapshot(latest)?
parent.replaceChild4INodeFileWithSnapshot(this) getParent().replaceChild4INodeFileWithSnapshot(this)
.recordModification(latest) .recordModification(latest)
: this; : this;
} }

View File

@ -133,7 +133,7 @@ public class INodeFileUnderConstruction extends INodeFile implements MutableBloc
public INodeFileUnderConstruction recordModification(final Snapshot latest) public INodeFileUnderConstruction recordModification(final Snapshot latest)
throws NSQuotaExceededException { throws NSQuotaExceededException {
return isInLatestSnapshot(latest)? return isInLatestSnapshot(latest)?
parent.replaceChild4INodeFileUcWithSnapshot(this) getParent().replaceChild4INodeFileUcWithSnapshot(this)
.recordModification(latest) .recordModification(latest)
: this; : this;
} }

View File

@ -47,7 +47,7 @@ public class INodeSymlink extends INode {
@Override @Override
INode recordModification(Snapshot latest) throws NSQuotaExceededException { INode recordModification(Snapshot latest) throws NSQuotaExceededException {
return isInLatestSnapshot(latest)? return isInLatestSnapshot(latest)?
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this)) getParent().saveChild2Snapshot(this, latest, new INodeSymlink(this))
: this; : this;
} }

View File

@ -776,6 +776,15 @@ class NamenodeJspHelper {
} }
} }
private static String getLocalParentDir(INode inode) {
final INode parent = inode.isRoot() ? inode : inode.getParent();
String parentDir = "";
if (parent != null) {
parentDir = parent.getFullPathName();
}
return (parentDir != null) ? parentDir : "";
}
// utility class used in block_info_xml.jsp // utility class used in block_info_xml.jsp
static class XMLBlockInfo { static class XMLBlockInfo {
final Block block; final Block block;
@ -817,7 +826,7 @@ class NamenodeJspHelper {
doc.endTag(); doc.endTag();
doc.startTag("local_directory"); doc.startTag("local_directory");
doc.pcdata(inode.getLocalParentDir()); doc.pcdata(getLocalParentDir(inode));
doc.endTag(); doc.endTag();
doc.startTag("user_name"); doc.startTag("user_name");

View File

@ -227,7 +227,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
throw new SnapshotException("The snapshot " + oldName throw new SnapshotException("The snapshot " + oldName
+ " does not exist for directory " + path); + " does not exist for directory " + path);
} else { } else {
int indexOfNew = searchSnapshot(DFSUtil.string2Bytes(newName)); final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
int indexOfNew = searchSnapshot(newNameBytes);
if (indexOfNew > 0) { if (indexOfNew > 0) {
throw new SnapshotException("The snapshot " + newName throw new SnapshotException("The snapshot " + newName
+ " already exists for directory " + path); + " already exists for directory " + path);
@ -235,7 +236,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
// remove the one with old name from snapshotsByNames // remove the one with old name from snapshotsByNames
Snapshot snapshot = snapshotsByNames.remove(indexOfOld); Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
final INodeDirectory ssRoot = snapshot.getRoot(); final INodeDirectory ssRoot = snapshot.getRoot();
ssRoot.setLocalName(newName); ssRoot.setLocalName(newNameBytes);
indexOfNew = -indexOfNew - 1; indexOfNew = -indexOfNew - 1;
if (indexOfNew <= indexOfOld) { if (indexOfNew <= indexOfOld) {
snapshotsByNames.add(indexOfNew, snapshot); snapshotsByNames.add(indexOfNew, snapshot);

View File

@ -30,13 +30,13 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key; import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff; import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container; import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo; import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot.diff; package org.apache.hadoop.hdfs.util;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;

View File

@ -139,7 +139,7 @@ public class TestINodeFile {
replication = 3; replication = 3;
preferredBlockSize = 128*1024*1024; preferredBlockSize = 128*1024*1024;
INodeFile inf = createINodeFile(replication, preferredBlockSize); INodeFile inf = createINodeFile(replication, preferredBlockSize);
inf.setLocalName("f"); inf.setLocalName(DFSUtil.string2Bytes("f"));
INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
INodeDirectory.ROOT_NAME, perm, 0L); INodeDirectory.ROOT_NAME, perm, 0L);
@ -147,19 +147,15 @@ public class TestINodeFile {
DFSUtil.string2Bytes("d"), perm, 0L); DFSUtil.string2Bytes("d"), perm, 0L);
assertEquals("f", inf.getFullPathName()); assertEquals("f", inf.getFullPathName());
assertEquals("", inf.getLocalParentDir());
dir.addChild(inf); dir.addChild(inf);
assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName()); assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals("d", inf.getLocalParentDir());
root.addChild(dir); root.addChild(dir);
assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName()); assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals(Path.SEPARATOR+"d", dir.getFullPathName()); assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
assertEquals(Path.SEPARATOR, root.getFullPathName()); assertEquals(Path.SEPARATOR, root.getFullPathName());
assertEquals(Path.SEPARATOR, root.getLocalParentDir());
} }
/** /**
@ -229,7 +225,7 @@ public class TestINodeFile {
for (int i = 0; i < nCount; i++) { for (int i = 0; i < nCount; i++) {
iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
preferredBlockSize); preferredBlockSize);
iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i)); iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
BlockInfo newblock = new BlockInfo(replication); BlockInfo newblock = new BlockInfo(replication);
iNodes[i].addBlock(newblock); iNodes[i].addBlock(newblock);
} }

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot.diff; package org.apache.hadoop.hdfs.util;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
@ -26,8 +26,9 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container; import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo; import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -45,7 +46,7 @@ public class TestDiff {
} }
/** Test directory diff. */ /** Test directory diff. */
@Test @Test(timeout=60000)
public void testDiff() throws Exception { public void testDiff() throws Exception {
for(int startSize = 0; startSize <= 1000; startSize = nextStep(startSize)) { for(int startSize = 0; startSize <= 1000; startSize = nextStep(startSize)) {
for(int m = 0; m <= 10000; m = nextStep(m)) { for(int m = 0; m <= 10000; m = nextStep(m)) {