HDFS-7943. Append cannot handle the last block with length greater than the preferred block size. Contributed by Jing Zhao.

(cherry picked from commit bee5a6a64a)
This commit is contained in:
Jing Zhao 2015-03-18 18:40:59 -07:00
parent 79c07bbaca
commit 6dcc79507d
3 changed files with 44 additions and 2 deletions

View File

@ -896,6 +896,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7945. The WebHdfs system on DN does not honor the length parameter. HDFS-7945. The WebHdfs system on DN does not honor the length parameter.
(wheat9) (wheat9)
HDFS-7943. Append cannot handle the last block with length greater than
the preferred block size. (jing9)
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

View File

@ -34,6 +34,16 @@ import java.util.List;
import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.now;
/**
* Restrictions for a concat operation:
* <pre>
* 1. the src file and the target file are in the same dir
* 2. all the source files are not in snapshot
* 3. any source file cannot be the same with the target file
* 4. source files cannot be under construction or empty
* 5. source file's preferred block size cannot be greater than the target file
* </pre>
*/
class FSDirConcatOp { class FSDirConcatOp {
static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs, static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
@ -123,14 +133,25 @@ class FSDirConcatOp {
throw new SnapshotException("Concat: the source file " + src throw new SnapshotException("Concat: the source file " + src
+ " is referred by some other reference in some snapshot."); + " is referred by some other reference in some snapshot.");
} }
// source file cannot be the same with the target file
if (srcINode == targetINode) { if (srcINode == targetINode) {
throw new HadoopIllegalArgumentException("concat: the src file " + src throw new HadoopIllegalArgumentException("concat: the src file " + src
+ " is the same with the target file " + targetIIP.getPath()); + " is the same with the target file " + targetIIP.getPath());
} }
// source file cannot be under construction or empty
if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) { if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) {
throw new HadoopIllegalArgumentException("concat: source file " + src throw new HadoopIllegalArgumentException("concat: source file " + src
+ " is invalid or empty or underConstruction"); + " is invalid or empty or underConstruction");
} }
// source file's preferred block size cannot be greater than the target
// file
if (srcINodeFile.getPreferredBlockSize() >
targetINode.getPreferredBlockSize()) {
throw new HadoopIllegalArgumentException("concat: source file " + src
+ " has preferred block size " + srcINodeFile.getPreferredBlockSize()
+ " which is greater than the target file's preferred block size "
+ targetINode.getPreferredBlockSize());
}
si.add(srcINodeFile); si.add(srcINodeFile);
} }
@ -143,9 +164,10 @@ class FSDirConcatOp {
return si.toArray(new INodeFile[si.size()]); return si.toArray(new INodeFile[si.size()]);
} }
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) { private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
INodeFile target, INodeFile[] srcList) {
QuotaCounts deltas = new QuotaCounts.Builder().build(); QuotaCounts deltas = new QuotaCounts.Builder().build();
short targetRepl = target.getBlockReplication(); final short targetRepl = target.getBlockReplication();
for (INodeFile src : srcList) { for (INodeFile src : srcList) {
short srcRepl = src.getBlockReplication(); short srcRepl = src.getBlockReplication();
long fileSize = src.computeFileSize(); long fileSize = src.computeFileSize();

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -388,6 +389,22 @@ public class TestHDFSConcat {
} catch (Exception e) { } catch (Exception e) {
// exspected // exspected
} }
// the source file's preferred block size cannot be greater than the target
{
final Path src1 = new Path(parentDir, "src1");
DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L);
final Path src2 = new Path(parentDir, "src2");
// create a file whose preferred block size is greater than the target
DFSTestUtil.createFile(dfs, src2, 1024, fileLen,
dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L);
try {
dfs.concat(trg, new Path[] {src1, src2});
fail("didn't fail for src with greater preferred block size");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("preferred block size", e);
}
}
} }
/** /**