HDFS-4143. Change blocks to private in INodeFile and renames isLink() to isSymlink() in INode.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1405237 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a9e4b08f2b
commit
d174f574ba
|
@ -445,6 +445,9 @@ Release 2.0.3-alpha - Unreleased
|
||||||
|
|
||||||
HDFS-3916. libwebhdfs testing code cleanup. (Jing Zhao via suresh)
|
HDFS-3916. libwebhdfs testing code cleanup. (Jing Zhao via suresh)
|
||||||
|
|
||||||
|
HDFS-4143. Change blocks to private in INodeFile and renames isLink() to
|
||||||
|
isSymlink() in INode. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -313,7 +313,7 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
if(newParent == null)
|
if(newParent == null)
|
||||||
return null;
|
return null;
|
||||||
if(!newNode.isDirectory() && !newNode.isLink()) {
|
if(!newNode.isDirectory() && !newNode.isSymlink()) {
|
||||||
// Add file->block mapping
|
// Add file->block mapping
|
||||||
INodeFile newF = (INodeFile)newNode;
|
INodeFile newF = (INodeFile)newNode;
|
||||||
BlockInfo[] blocks = newF.getBlocks();
|
BlockInfo[] blocks = newF.getBlocks();
|
||||||
|
@ -533,7 +533,7 @@ public class FSDirectory implements Closeable {
|
||||||
if (dst.equals(src)) {
|
if (dst.equals(src)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (srcInode.isLink() &&
|
if (srcInode.isSymlink() &&
|
||||||
dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
|
dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
|
||||||
throw new FileAlreadyExistsException(
|
throw new FileAlreadyExistsException(
|
||||||
"Cannot rename symlink "+src+" to its target "+dst);
|
"Cannot rename symlink "+src+" to its target "+dst);
|
||||||
|
@ -655,7 +655,7 @@ public class FSDirectory implements Closeable {
|
||||||
throw new FileAlreadyExistsException(
|
throw new FileAlreadyExistsException(
|
||||||
"The source "+src+" and destination "+dst+" are the same");
|
"The source "+src+" and destination "+dst+" are the same");
|
||||||
}
|
}
|
||||||
if (srcInode.isLink() &&
|
if (srcInode.isSymlink() &&
|
||||||
dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
|
dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
|
||||||
throw new FileAlreadyExistsException(
|
throw new FileAlreadyExistsException(
|
||||||
"Cannot rename symlink "+src+" to its target "+dst);
|
"Cannot rename symlink "+src+" to its target "+dst);
|
||||||
|
@ -819,7 +819,7 @@ public class FSDirectory implements Closeable {
|
||||||
if (inode == null) {
|
if (inode == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
assert !inode.isLink();
|
assert !inode.isSymlink();
|
||||||
if (inode.isDirectory()) {
|
if (inode.isDirectory()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -851,7 +851,7 @@ public class FSDirectory implements Closeable {
|
||||||
if (inode == null) {
|
if (inode == null) {
|
||||||
throw new FileNotFoundException("File does not exist: " + filename);
|
throw new FileNotFoundException("File does not exist: " + filename);
|
||||||
}
|
}
|
||||||
if (inode.isDirectory() || inode.isLink()) {
|
if (inode.isDirectory() || inode.isSymlink()) {
|
||||||
throw new IOException("Getting block size of non-file: "+ filename);
|
throw new IOException("Getting block size of non-file: "+ filename);
|
||||||
}
|
}
|
||||||
return ((INodeFile)inode).getPreferredBlockSize();
|
return ((INodeFile)inode).getPreferredBlockSize();
|
||||||
|
@ -868,7 +868,7 @@ public class FSDirectory implements Closeable {
|
||||||
if (inode == null) {
|
if (inode == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return inode.isDirectory() || inode.isLink()
|
return inode.isDirectory() || inode.isSymlink()
|
||||||
? true
|
? true
|
||||||
: ((INodeFile)inode).getBlocks() != null;
|
: ((INodeFile)inode).getBlocks() != null;
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -968,7 +968,7 @@ public class FSDirectory implements Closeable {
|
||||||
for(String src : srcs) {
|
for(String src : srcs) {
|
||||||
INodeFile srcInode = (INodeFile)getINode(src);
|
INodeFile srcInode = (INodeFile)getINode(src);
|
||||||
allSrcInodes[i++] = srcInode;
|
allSrcInodes[i++] = srcInode;
|
||||||
totalBlocks += srcInode.blocks.length;
|
totalBlocks += srcInode.numBlocks();
|
||||||
}
|
}
|
||||||
trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks
|
trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks
|
||||||
|
|
||||||
|
@ -977,7 +977,7 @@ public class FSDirectory implements Closeable {
|
||||||
for(INodeFile nodeToRemove: allSrcInodes) {
|
for(INodeFile nodeToRemove: allSrcInodes) {
|
||||||
if(nodeToRemove == null) continue;
|
if(nodeToRemove == null) continue;
|
||||||
|
|
||||||
nodeToRemove.blocks = null;
|
nodeToRemove.setBlocks(null);
|
||||||
trgParent.removeChild(nodeToRemove);
|
trgParent.removeChild(nodeToRemove);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
@ -1208,7 +1208,7 @@ public class FSDirectory implements Closeable {
|
||||||
return null;
|
return null;
|
||||||
if (targetNode.isDirectory())
|
if (targetNode.isDirectory())
|
||||||
return null;
|
return null;
|
||||||
if (targetNode.isLink())
|
if (targetNode.isSymlink())
|
||||||
return null;
|
return null;
|
||||||
return ((INodeFile)targetNode).getBlocks();
|
return ((INodeFile)targetNode).getBlocks();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -1822,7 +1822,7 @@ public class FSDirectory implements Closeable {
|
||||||
if (child.isDirectory()) {
|
if (child.isDirectory()) {
|
||||||
updateCountForINodeWithQuota((INodeDirectory)child,
|
updateCountForINodeWithQuota((INodeDirectory)child,
|
||||||
counts, nodesInPath);
|
counts, nodesInPath);
|
||||||
} else if (child.isLink()) {
|
} else if (child.isSymlink()) {
|
||||||
counts.nsCount += 1;
|
counts.nsCount += 1;
|
||||||
} else { // reduce recursive calls
|
} else { // reduce recursive calls
|
||||||
counts.nsCount += 1;
|
counts.nsCount += 1;
|
||||||
|
@ -2051,7 +2051,7 @@ public class FSDirectory implements Closeable {
|
||||||
node.getFsPermission(),
|
node.getFsPermission(),
|
||||||
node.getUserName(),
|
node.getUserName(),
|
||||||
node.getGroupName(),
|
node.getGroupName(),
|
||||||
node.isLink() ? ((INodeSymlink)node).getSymlink() : null,
|
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
|
||||||
path);
|
path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2087,7 +2087,7 @@ public class FSDirectory implements Closeable {
|
||||||
node.getFsPermission(),
|
node.getFsPermission(),
|
||||||
node.getUserName(),
|
node.getUserName(),
|
||||||
node.getGroupName(),
|
node.getGroupName(),
|
||||||
node.isLink() ? ((INodeSymlink)node).getSymlink() : null,
|
node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
|
||||||
path,
|
path,
|
||||||
loc);
|
loc);
|
||||||
}
|
}
|
||||||
|
@ -2158,7 +2158,7 @@ public class FSDirectory implements Closeable {
|
||||||
*/
|
*/
|
||||||
void cacheName(INode inode) {
|
void cacheName(INode inode) {
|
||||||
// Name is cached only for files
|
// Name is cached only for files
|
||||||
if (inode.isDirectory() || inode.isLink()) {
|
if (inode.isDirectory() || inode.isSymlink()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ByteArray name = new ByteArray(inode.getLocalNameBytes());
|
ByteArray name = new ByteArray(inode.getLocalNameBytes());
|
||||||
|
|
|
@ -162,7 +162,7 @@ public class FSImageSerialization {
|
||||||
PermissionStatus.write(out, node.getUserName(),
|
PermissionStatus.write(out, node.getUserName(),
|
||||||
node.getGroupName(),
|
node.getGroupName(),
|
||||||
filePerm);
|
filePerm);
|
||||||
} else if (node.isLink()) {
|
} else if (node.isSymlink()) {
|
||||||
out.writeShort(0); // replication
|
out.writeShort(0); // replication
|
||||||
out.writeLong(0); // modification time
|
out.writeLong(0); // modification time
|
||||||
out.writeLong(0); // access time
|
out.writeLong(0); // access time
|
||||||
|
|
|
@ -1390,7 +1390,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
+ target + " is under construction");
|
+ target + " is under construction");
|
||||||
}
|
}
|
||||||
// per design target shouldn't be empty and all the blocks same size
|
// per design target shouldn't be empty and all the blocks same size
|
||||||
if(trgInode.blocks.length == 0) {
|
if(trgInode.numBlocks() == 0) {
|
||||||
throw new HadoopIllegalArgumentException("concat: target file "
|
throw new HadoopIllegalArgumentException("concat: target file "
|
||||||
+ target + " is empty");
|
+ target + " is empty");
|
||||||
}
|
}
|
||||||
|
@ -1398,10 +1398,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
long blockSize = trgInode.getPreferredBlockSize();
|
long blockSize = trgInode.getPreferredBlockSize();
|
||||||
|
|
||||||
// check the end block to be full
|
// check the end block to be full
|
||||||
if(blockSize != trgInode.blocks[trgInode.blocks.length-1].getNumBytes()) {
|
final BlockInfo last = trgInode.getLastBlock();
|
||||||
|
if(blockSize != last.getNumBytes()) {
|
||||||
throw new HadoopIllegalArgumentException("The last block in " + target
|
throw new HadoopIllegalArgumentException("The last block in " + target
|
||||||
+ " is not full; last block size = "
|
+ " is not full; last block size = " + last.getNumBytes()
|
||||||
+ trgInode.blocks[trgInode.blocks.length-1].getNumBytes()
|
|
||||||
+ " but file block size = " + blockSize);
|
+ " but file block size = " + blockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1418,7 +1418,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
final INodeFile srcInode = INodeFile.valueOf(dir.getINode(src), src);
|
final INodeFile srcInode = INodeFile.valueOf(dir.getINode(src), src);
|
||||||
if(src.isEmpty()
|
if(src.isEmpty()
|
||||||
|| srcInode.isUnderConstruction()
|
|| srcInode.isUnderConstruction()
|
||||||
|| srcInode.blocks.length == 0) {
|
|| srcInode.numBlocks() == 0) {
|
||||||
throw new HadoopIllegalArgumentException("concat: source file " + src
|
throw new HadoopIllegalArgumentException("concat: source file " + src
|
||||||
+ " is invalid or empty or underConstruction");
|
+ " is invalid or empty or underConstruction");
|
||||||
}
|
}
|
||||||
|
@ -1435,15 +1435,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
//boolean endBlock=false;
|
//boolean endBlock=false;
|
||||||
// verify that all the blocks are of the same length as target
|
// verify that all the blocks are of the same length as target
|
||||||
// should be enough to check the end blocks
|
// should be enough to check the end blocks
|
||||||
int idx = srcInode.blocks.length-1;
|
final BlockInfo[] srcBlocks = srcInode.getBlocks();
|
||||||
|
int idx = srcBlocks.length-1;
|
||||||
if(endSrc)
|
if(endSrc)
|
||||||
idx = srcInode.blocks.length-2; // end block of endSrc is OK not to be full
|
idx = srcBlocks.length-2; // end block of endSrc is OK not to be full
|
||||||
if(idx >= 0 && srcInode.blocks[idx].getNumBytes() != blockSize) {
|
if(idx >= 0 && srcBlocks[idx].getNumBytes() != blockSize) {
|
||||||
throw new HadoopIllegalArgumentException("concat: the soruce file "
|
throw new HadoopIllegalArgumentException("concat: the soruce file "
|
||||||
+ src + " and the target file " + target
|
+ src + " and the target file " + target
|
||||||
+ " should have the same blocks sizes: target block size is "
|
+ " should have the same blocks sizes: target block size is "
|
||||||
+ blockSize + " but the size of source block " + idx + " is "
|
+ blockSize + " but the size of source block " + idx + " is "
|
||||||
+ srcInode.blocks[idx].getNumBytes());
|
+ srcBlocks[idx].getNumBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
si.add(srcInode);
|
si.add(srcInode);
|
||||||
|
@ -1678,7 +1679,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
if (parentNode == null) {
|
if (parentNode == null) {
|
||||||
throw new FileNotFoundException("Parent directory doesn't exist: "
|
throw new FileNotFoundException("Parent directory doesn't exist: "
|
||||||
+ parent.toString());
|
+ parent.toString());
|
||||||
} else if (!parentNode.isDirectory() && !parentNode.isLink()) {
|
} else if (!parentNode.isDirectory() && !parentNode.isSymlink()) {
|
||||||
throw new ParentNotDirectoryException("Parent path is not a directory: "
|
throw new ParentNotDirectoryException("Parent path is not a directory: "
|
||||||
+ parent.toString());
|
+ parent.toString());
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,7 +183,9 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
/**
|
/**
|
||||||
* Check whether it's a directory
|
* Check whether it's a directory
|
||||||
*/
|
*/
|
||||||
abstract boolean isDirectory();
|
public boolean isDirectory() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Collect all the blocks in all children of this INode.
|
* Collect all the blocks in all children of this INode.
|
||||||
|
@ -332,7 +334,7 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
/**
|
/**
|
||||||
* Check whether it's a symlink
|
* Check whether it's a symlink
|
||||||
*/
|
*/
|
||||||
public boolean isLink() {
|
public boolean isSymlink() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,11 +78,9 @@ class INodeDirectory extends INode {
|
||||||
this.children = other.getChildren();
|
this.children = other.getChildren();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/** @return true unconditionally. */
|
||||||
* Check whether it's a directory
|
|
||||||
*/
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isDirectory() {
|
public final boolean isDirectory() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,7 +205,7 @@ class INodeDirectory extends INode {
|
||||||
if (index >= 0) {
|
if (index >= 0) {
|
||||||
existing.inodes[index] = curNode;
|
existing.inodes[index] = curNode;
|
||||||
}
|
}
|
||||||
if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) {
|
if (curNode.isSymlink() && (!lastComp || (lastComp && resolveLink))) {
|
||||||
final String path = constructPath(components, 0, components.length);
|
final String path = constructPath(components, 0, components.length);
|
||||||
final String preceding = constructPath(components, 0, count);
|
final String preceding = constructPath(components, 0, count);
|
||||||
final String remainder =
|
final String remainder =
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
|
|
||||||
private long header;
|
private long header;
|
||||||
|
|
||||||
BlockInfo blocks[] = null;
|
private BlockInfo[] blocks;
|
||||||
|
|
||||||
INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
|
INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
|
||||||
short replication, long modificationTime,
|
short replication, long modificationTime,
|
||||||
|
@ -63,7 +63,7 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
super(permissions, modificationTime, atime);
|
super(permissions, modificationTime, atime);
|
||||||
this.setReplication(replication);
|
this.setReplication(replication);
|
||||||
this.setPreferredBlockSize(preferredBlockSize);
|
this.setPreferredBlockSize(preferredBlockSize);
|
||||||
blocks = blklist;
|
this.blocks = blklist;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -76,11 +76,6 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
super.setPermission(permission.applyUMask(UMASK));
|
super.setPermission(permission.applyUMask(UMASK));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
boolean isDirectory() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return the replication factor of the file. */
|
/** @return the replication factor of the file. */
|
||||||
@Override
|
@Override
|
||||||
public short getBlockReplication() {
|
public short getBlockReplication() {
|
||||||
|
@ -128,7 +123,7 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
for(BlockInfo bi: newlist) {
|
for(BlockInfo bi: newlist) {
|
||||||
bi.setBlockCollection(this);
|
bi.setBlockCollection(this);
|
||||||
}
|
}
|
||||||
this.blocks = newlist;
|
setBlocks(newlist);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -136,14 +131,13 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
*/
|
*/
|
||||||
void addBlock(BlockInfo newblock) {
|
void addBlock(BlockInfo newblock) {
|
||||||
if (this.blocks == null) {
|
if (this.blocks == null) {
|
||||||
this.blocks = new BlockInfo[1];
|
this.setBlocks(new BlockInfo[]{newblock});
|
||||||
this.blocks[0] = newblock;
|
|
||||||
} else {
|
} else {
|
||||||
int size = this.blocks.length;
|
int size = this.blocks.length;
|
||||||
BlockInfo[] newlist = new BlockInfo[size + 1];
|
BlockInfo[] newlist = new BlockInfo[size + 1];
|
||||||
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
||||||
newlist[size] = newblock;
|
newlist[size] = newblock;
|
||||||
this.blocks = newlist;
|
this.setBlocks(newlist);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,6 +146,11 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
this.blocks[idx] = blk;
|
this.blocks[idx] = blk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Set the blocks. */
|
||||||
|
public void setBlocks(BlockInfo[] blocks) {
|
||||||
|
this.blocks = blocks;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
int collectSubtreeBlocksAndClear(List<Block> v) {
|
int collectSubtreeBlocksAndClear(List<Block> v) {
|
||||||
parent = null;
|
parent = null;
|
||||||
|
@ -161,7 +160,7 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
blk.setBlockCollection(null);
|
blk.setBlockCollection(null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
blocks = null;
|
setBlocks(null);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
|
@ -28,8 +29,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
|
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* I-node for file being written.
|
* I-node for file being written.
|
||||||
*/
|
*/
|
||||||
|
@ -109,9 +108,9 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
// use the modification time as the access time
|
// use the modification time as the access time
|
||||||
//
|
//
|
||||||
INodeFile convertToInodeFile() {
|
INodeFile convertToInodeFile() {
|
||||||
assert allBlocksComplete() :
|
assert allBlocksComplete() : "Can't finalize inode " + this
|
||||||
"Can't finalize inode " + this + " since it contains " +
|
+ " since it contains non-complete blocks! Blocks are "
|
||||||
"non-complete blocks! Blocks are: " + blocksAsString();
|
+ Arrays.asList(getBlocks());
|
||||||
INodeFile obj = new INodeFile(getPermissionStatus(),
|
INodeFile obj = new INodeFile(getPermissionStatus(),
|
||||||
getBlocks(),
|
getBlocks(),
|
||||||
getBlockReplication(),
|
getBlockReplication(),
|
||||||
|
@ -126,7 +125,7 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
* @return true if all of the blocks in this file are marked as completed.
|
* @return true if all of the blocks in this file are marked as completed.
|
||||||
*/
|
*/
|
||||||
private boolean allBlocksComplete() {
|
private boolean allBlocksComplete() {
|
||||||
for (BlockInfo b : blocks) {
|
for (BlockInfo b : getBlocks()) {
|
||||||
if (!b.isComplete()) {
|
if (!b.isComplete()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -139,6 +138,7 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
* the last one on the list.
|
* the last one on the list.
|
||||||
*/
|
*/
|
||||||
void removeLastBlock(Block oldblock) throws IOException {
|
void removeLastBlock(Block oldblock) throws IOException {
|
||||||
|
final BlockInfo[] blocks = getBlocks();
|
||||||
if (blocks == null) {
|
if (blocks == null) {
|
||||||
throw new IOException("Trying to delete non-existant block " + oldblock);
|
throw new IOException("Trying to delete non-existant block " + oldblock);
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
//copy to a new list
|
//copy to a new list
|
||||||
BlockInfo[] newlist = new BlockInfo[size_1];
|
BlockInfo[] newlist = new BlockInfo[size_1];
|
||||||
System.arraycopy(blocks, 0, newlist, 0, size_1);
|
System.arraycopy(blocks, 0, newlist, 0, size_1);
|
||||||
blocks = newlist;
|
setBlocks(newlist);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -159,11 +159,9 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
|
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
|
||||||
DatanodeDescriptor[] targets)
|
DatanodeDescriptor[] targets) throws IOException {
|
||||||
throws IOException {
|
if (numBlocks() == 0) {
|
||||||
if (blocks == null || blocks.length == 0) {
|
throw new IOException("Failed to set last block: File is empty.");
|
||||||
throw new IOException("Trying to update non-existant block. " +
|
|
||||||
"File is empty.");
|
|
||||||
}
|
}
|
||||||
BlockInfoUnderConstruction ucBlock =
|
BlockInfoUnderConstruction ucBlock =
|
||||||
lastBlock.convertToBlockUnderConstruction(
|
lastBlock.convertToBlockUnderConstruction(
|
||||||
|
@ -172,8 +170,4 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
setBlock(numBlocks()-1, ucBlock);
|
setBlock(numBlocks()-1, ucBlock);
|
||||||
return ucBlock;
|
return ucBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String blocksAsString() {
|
|
||||||
return Joiner.on(",").join(this.blocks);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ public class INodeSymlink extends INode {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isLink() {
|
public boolean isSymlink() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,9 +73,4 @@ public class INodeSymlink extends INode {
|
||||||
summary[1]++; // Increment the file count
|
summary[1]++; // Increment the file count
|
||||||
return summary;
|
return summary;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isDirectory() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
|
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.hdfs.tools.DFSck;
|
import org.apache.hadoop.hdfs.tools.DFSck;
|
||||||
|
@ -678,11 +679,11 @@ public class TestFsck {
|
||||||
DFSTestUtil.waitReplication(fs, filePath, (short)1);
|
DFSTestUtil.waitReplication(fs, filePath, (short)1);
|
||||||
|
|
||||||
// intentionally corrupt NN data structure
|
// intentionally corrupt NN data structure
|
||||||
INodeFile node =
|
INodeFile node = (INodeFile)cluster.getNamesystem().dir.rootDir.getNode(
|
||||||
(INodeFile)cluster.getNamesystem().dir.rootDir.getNode(fileName,
|
fileName, true);
|
||||||
true);
|
final BlockInfo[] blocks = node.getBlocks();
|
||||||
assertEquals(node.blocks.length, 1);
|
assertEquals(blocks.length, 1);
|
||||||
node.blocks[0].setNumBytes(-1L); // set the block length to be negative
|
blocks[0].setNumBytes(-1L); // set the block length to be negative
|
||||||
|
|
||||||
// run fsck and expect a failure with -1 as the error code
|
// run fsck and expect a failure with -1 as the error code
|
||||||
String outStr = runFsck(conf, -1, true, fileName);
|
String outStr = runFsck(conf, -1, true, fileName);
|
||||||
|
|
Loading…
Reference in New Issue