HDFS-8891. HDFS concat should keep srcs order. Contributed by Yong Zhang.

(cherry picked from commit dc7a061668)
(cherry picked from commit 2edfc3ce71)
This commit is contained in:
Jing Zhao 2015-08-14 14:42:43 -07:00 committed by Chris Douglas
parent f333e367e1
commit ba0f66984c
3 changed files with 24 additions and 7 deletions

View File

@ -30,6 +30,8 @@ Release 2.7.2 - UNRELEASED
HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
flawed. (Kihwal Lee via yliu) flawed. (Kihwal Lee via yliu)
HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
Release 2.7.1 - 2015-07-06 Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashSet; import java.util.LinkedHashSet;
import java.util.Set; import java.util.Set;
import java.util.List; import java.util.List;
@ -103,7 +104,7 @@ class FSDirConcatOp {
private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs, private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
INodesInPath targetIIP, FSPermissionChecker pc) throws IOException { INodesInPath targetIIP, FSPermissionChecker pc) throws IOException {
// to make sure no two files are the same // to make sure no two files are the same
Set<INodeFile> si = new HashSet<>(); Set<INodeFile> si = new LinkedHashSet<>();
final INodeFile targetINode = targetIIP.getLastINode().asFile(); final INodeFile targetINode = targetIIP.getLastINode().asFile();
final INodeDirectory targetParent = targetINode.getParent(); final INodeDirectory targetParent = targetINode.getParent();
// now check the srcs // now check the srcs

View File

@ -111,18 +111,21 @@ public class TestHDFSConcat {
long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount(); long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
Path [] files = new Path[numFiles]; Path [] files = new Path[numFiles];
byte [] [] bytes = new byte [numFiles][(int)fileLen]; byte[][] bytes = new byte[numFiles + 1][(int) fileLen];
LocatedBlocks [] lblocks = new LocatedBlocks[numFiles]; LocatedBlocks [] lblocks = new LocatedBlocks[numFiles];
long [] lens = new long [numFiles]; long [] lens = new long [numFiles];
stm = dfs.open(trgPath);
stm.readFully(0, bytes[0]);
stm.close();
int i; int i;
for(i=0; i<files.length; i++) { for(i=0; i<files.length; i++) {
files[i] = new Path("/file"+i); files[i] = new Path("/file"+i);
Path path = files[i]; Path path = files[i];
System.out.println("Creating file " + path); System.out.println("Creating file " + path);
DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
// make files with different content
DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, i);
fStatus = nn.getFileInfo(path.toUri().getPath()); fStatus = nn.getFileInfo(path.toUri().getPath());
lens[i] = fStatus.getLen(); lens[i] = fStatus.getLen();
assertEquals(trgLen, lens[i]); // file of the same length. assertEquals(trgLen, lens[i]); // file of the same length.
@ -131,7 +134,7 @@ public class TestHDFSConcat {
//read the file //read the file
stm = dfs.open(path); stm = dfs.open(path);
stm.readFully(0, bytes[i]); stm.readFully(0, bytes[i + 1]);
//bytes[i][10] = 10; //bytes[i][10] = 10;
stm.close(); stm.close();
} }
@ -153,6 +156,17 @@ public class TestHDFSConcat {
// check count update // check count update
ContentSummary cBefore = dfs.getContentSummary(trgPath.getParent()); ContentSummary cBefore = dfs.getContentSummary(trgPath.getParent());
// resort file array, make INode id not sorted.
for (int j = 0; j < files.length / 2; j++) {
Path tempPath = files[j];
files[j] = files[files.length - 1 - j];
files[files.length - 1 - j] = tempPath;
byte[] tempBytes = bytes[1 + j];
bytes[1 + j] = bytes[files.length - 1 - j + 1];
bytes[files.length - 1 - j + 1] = tempBytes;
}
// now concatenate // now concatenate
dfs.concat(trgPath, files); dfs.concat(trgPath, files);