HDFS-4574. Move Diff to the util package.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1454988 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-03-11 03:45:52 +00:00
parent d32fb8a62b
commit 43f8d0b9c9
13 changed files with 39 additions and 45 deletions

View File

@ -190,3 +190,5 @@ Branch-2802 Snapshot (Unreleased)
(Jing Zhao via szetszwo)
HDFS-4579. Annotate snapshot tests. (Arpit Agarwal via suresh)
HDFS-4574. Move Diff to the util package. (szetszwo)

View File

@ -1558,7 +1558,7 @@ static String getFullPathName(INode[] inodes, int pos) {
private static INode[] getRelativePathINodes(INode inode, INode ancestor) {
// calculate the depth of this inode from the ancestor
int depth = 0;
for (INode i = inode; i != null && !i.equals(ancestor); i = i.parent) {
for (INode i = inode; i != null && !i.equals(ancestor); i = i.getParent()) {
depth++;
}
INode[] inodes = new INode[depth];
@ -1571,7 +1571,7 @@ private static INode[] getRelativePathINodes(INode inode, INode ancestor) {
return null;
}
inodes[depth-i-1] = inode;
inode = inode.parent;
inode = inode.getParent();
}
return inodes;
}

View File

@ -34,8 +34,10 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
@ -109,7 +111,7 @@ static long toLong(PermissionStatus ps) {
* should not modify it.
*/
private long permission = 0L;
INodeDirectory parent = null;
private INodeDirectory parent = null;
private long modificationTime = 0L;
private long accessTime = 0L;
@ -509,16 +511,6 @@ public String getLocalName() {
return name == null? null: DFSUtil.bytes2String(name);
}
String getLocalParentDir() {
INode inode = isRoot() ? this : getParent();
String parentDir = "";
if (inode != null) {
parentDir = inode.getFullPathName();
}
return (parentDir != null) ? parentDir : "";
}
/**
* @return null if the local name is null;
* otherwise, return the local name byte array.
@ -532,13 +524,6 @@ public byte[] getKey() {
return getLocalNameBytes();
}
/**
* Set local file name
*/
public void setLocalName(String name) {
setLocalName(DFSUtil.string2Bytes(name));
}
/**
* Set local file name
*/

View File

@ -82,7 +82,7 @@ public INodeDirectory(INodeDirectory other, boolean adopt) {
this.children = other.children;
if (adopt && this.children != null) {
for (INode child : children) {
child.parent = this;
child.setParent(this);
}
}
}
@ -529,7 +529,7 @@ private void addChild(final INode node, final int insertionPoint) {
if (children == null) {
children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
}
node.parent = this;
node.setParent(this);
children.add(-insertionPoint - 1, node);
if (node.getGroupName() == null) {

View File

@ -157,7 +157,7 @@ public INodeFile getSnapshotINode(final Snapshot snapshot) {
public INodeFile recordModification(final Snapshot latest)
throws NSQuotaExceededException {
return isInLatestSnapshot(latest)?
parent.replaceChild4INodeFileWithSnapshot(this)
getParent().replaceChild4INodeFileWithSnapshot(this)
.recordModification(latest)
: this;
}

View File

@ -133,7 +133,7 @@ protected INodeFile toINodeFile(long mtime) {
public INodeFileUnderConstruction recordModification(final Snapshot latest)
throws NSQuotaExceededException {
return isInLatestSnapshot(latest)?
parent.replaceChild4INodeFileUcWithSnapshot(this)
getParent().replaceChild4INodeFileUcWithSnapshot(this)
.recordModification(latest)
: this;
}

View File

@ -47,7 +47,7 @@ public class INodeSymlink extends INode {
@Override
INode recordModification(Snapshot latest) throws NSQuotaExceededException {
return isInLatestSnapshot(latest)?
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this))
getParent().saveChild2Snapshot(this, latest, new INodeSymlink(this))
: this;
}

View File

@ -776,6 +776,15 @@ void generateNodesList(ServletContext context, JspWriter out,
}
}
private static String getLocalParentDir(INode inode) {
final INode parent = inode.isRoot() ? inode : inode.getParent();
String parentDir = "";
if (parent != null) {
parentDir = parent.getFullPathName();
}
return (parentDir != null) ? parentDir : "";
}
// utility class used in block_info_xml.jsp
static class XMLBlockInfo {
final Block block;
@ -817,7 +826,7 @@ public void toXML(XMLOutputter doc) throws IOException {
doc.endTag();
doc.startTag("local_directory");
doc.pcdata(inode.getLocalParentDir());
doc.pcdata(getLocalParentDir(inode));
doc.endTag();
doc.startTag("user_name");

View File

@ -227,7 +227,8 @@ public void renameSnapshot(String path, String oldName, String newName)
throw new SnapshotException("The snapshot " + oldName
+ " does not exist for directory " + path);
} else {
int indexOfNew = searchSnapshot(DFSUtil.string2Bytes(newName));
final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
int indexOfNew = searchSnapshot(newNameBytes);
if (indexOfNew > 0) {
throw new SnapshotException("The snapshot " + newName
+ " already exists for directory " + path);
@ -235,7 +236,7 @@ public void renameSnapshot(String path, String oldName, String newName)
// remove the one with old name from snapshotsByNames
Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
final INodeDirectory ssRoot = snapshot.getRoot();
ssRoot.setLocalName(newName);
ssRoot.setLocalName(newNameBytes);
indexOfNew = -indexOfNew - 1;
if (indexOfNew <= indexOfOld) {
snapshotsByNames.add(indexOfNew, snapshot);

View File

@ -30,13 +30,13 @@
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot.diff;
package org.apache.hadoop.hdfs.util;
import java.util.ArrayList;
import java.util.Collections;

View File

@ -139,7 +139,7 @@ public void testGetFullPathName() {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = createINodeFile(replication, preferredBlockSize);
inf.setLocalName("f");
inf.setLocalName(DFSUtil.string2Bytes("f"));
INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
INodeDirectory.ROOT_NAME, perm, 0L);
@ -147,19 +147,15 @@ public void testGetFullPathName() {
DFSUtil.string2Bytes("d"), perm, 0L);
assertEquals("f", inf.getFullPathName());
assertEquals("", inf.getLocalParentDir());
dir.addChild(inf);
assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals("d", inf.getLocalParentDir());
root.addChild(dir);
assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
assertEquals(Path.SEPARATOR, root.getFullPathName());
assertEquals(Path.SEPARATOR, root.getLocalParentDir());
}
/**
@ -229,7 +225,7 @@ private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
for (int i = 0; i < nCount; i++) {
iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
preferredBlockSize);
iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i));
iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
BlockInfo newblock = new BlockInfo(replication);
iNodes[i].addBlock(newblock);
}

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot.diff;
package org.apache.hadoop.hdfs.util;
import java.util.ArrayList;
import java.util.List;
@ -26,8 +26,9 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.junit.Assert;
import org.junit.Test;
@ -45,7 +46,7 @@ static int nextStep(int n) {
}
/** Test directory diff. */
@Test
@Test(timeout=60000)
public void testDiff() throws Exception {
for(int startSize = 0; startSize <= 1000; startSize = nextStep(startSize)) {
for(int m = 0; m <= 10000; m = nextStep(m)) {