HDFS-7167. NPE while running Mover if the given path is for a file. Contributed by Jing Zhao.
Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
This commit is contained in:
parent
6a60fa279a
commit
635fdcbe5c
|
@ -520,6 +520,11 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-7157. Using Time.now() for recording start/end time of reconfiguration
|
HDFS-7157. Using Time.now() for recording start/end time of reconfiguration
|
||||||
tasks (Lei Xu via cmccabe)
|
tasks (Lei Xu via cmccabe)
|
||||||
|
|
||||||
|
HDFS-6664. HDFS permissions guide documentation states incorrect default
|
||||||
|
group mapping class. (Ray Chiang via aw)
|
||||||
|
|
||||||
|
HDFS-4227. Document dfs.namenode.resource.* (Daisuke Kobayashi via aw)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
||||||
|
@ -751,10 +756,7 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-7140. Add a tool to list all the existing block storage policies.
|
HDFS-7140. Add a tool to list all the existing block storage policies.
|
||||||
(jing9)
|
(jing9)
|
||||||
|
|
||||||
HDFS-6664. HDFS permissions guide documentation states incorrect default
|
HDFS-7167. NPE while running Mover if the given path is for a file. (jing9)
|
||||||
group mapping class. (Ray Chiang via aw)
|
|
||||||
|
|
||||||
HDFS-4227. Document dfs.namenode.resource.* (Daisuke Kobayashi via aw)
|
|
||||||
|
|
||||||
Release 2.5.1 - 2014-09-05
|
Release 2.5.1 - 2014-09-05
|
||||||
|
|
||||||
|
|
|
@ -252,14 +252,9 @@ public class Mover {
|
||||||
*/
|
*/
|
||||||
private boolean processNamespace() {
|
private boolean processNamespace() {
|
||||||
getSnapshottableDirs();
|
getSnapshottableDirs();
|
||||||
boolean hasRemaining = true;
|
boolean hasRemaining = false;
|
||||||
try {
|
for (Path target : targetPaths) {
|
||||||
for (Path target : targetPaths) {
|
hasRemaining |= processPath(target.toUri().getPath());
|
||||||
hasRemaining = processDirRecursively("", dfs.getFileInfo(target
|
|
||||||
.toUri().getPath()));
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.warn("Failed to get root directory status. Ignore and continue.", e);
|
|
||||||
}
|
}
|
||||||
// wait for pending move to finish and retry the failed migration
|
// wait for pending move to finish and retry the failed migration
|
||||||
hasRemaining |= Dispatcher.waitForMoveCompletion(storages.targets.values());
|
hasRemaining |= Dispatcher.waitForMoveCompletion(storages.targets.values());
|
||||||
|
@ -270,7 +265,7 @@ public class Mover {
|
||||||
* @return whether there is still remaing migration work for the next
|
* @return whether there is still remaing migration work for the next
|
||||||
* round
|
* round
|
||||||
*/
|
*/
|
||||||
private boolean processChildrenList(String fullPath) {
|
private boolean processPath(String fullPath) {
|
||||||
boolean hasRemaining = false;
|
boolean hasRemaining = false;
|
||||||
for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
|
for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
|
||||||
final DirectoryListing children;
|
final DirectoryListing children;
|
||||||
|
@ -285,7 +280,7 @@ public class Mover {
|
||||||
return hasRemaining;
|
return hasRemaining;
|
||||||
}
|
}
|
||||||
for (HdfsFileStatus child : children.getPartialListing()) {
|
for (HdfsFileStatus child : children.getPartialListing()) {
|
||||||
hasRemaining |= processDirRecursively(fullPath, child);
|
hasRemaining |= processRecursively(fullPath, child);
|
||||||
}
|
}
|
||||||
if (children.hasMore()) {
|
if (children.hasMore()) {
|
||||||
lastReturnedName = children.getLastName();
|
lastReturnedName = children.getLastName();
|
||||||
|
@ -296,8 +291,7 @@ public class Mover {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return whether the migration requires next round */
|
/** @return whether the migration requires next round */
|
||||||
private boolean processDirRecursively(String parent,
|
private boolean processRecursively(String parent, HdfsFileStatus status) {
|
||||||
HdfsFileStatus status) {
|
|
||||||
String fullPath = status.getFullName(parent);
|
String fullPath = status.getFullName(parent);
|
||||||
boolean hasRemaining = false;
|
boolean hasRemaining = false;
|
||||||
if (status.isDir()) {
|
if (status.isDir()) {
|
||||||
|
@ -305,11 +299,11 @@ public class Mover {
|
||||||
fullPath = fullPath + Path.SEPARATOR;
|
fullPath = fullPath + Path.SEPARATOR;
|
||||||
}
|
}
|
||||||
|
|
||||||
hasRemaining = processChildrenList(fullPath);
|
hasRemaining = processPath(fullPath);
|
||||||
// process snapshots if this is a snapshottable directory
|
// process snapshots if this is a snapshottable directory
|
||||||
if (snapshottableDirs.contains(fullPath)) {
|
if (snapshottableDirs.contains(fullPath)) {
|
||||||
final String dirSnapshot = fullPath + HdfsConstants.DOT_SNAPSHOT_DIR;
|
final String dirSnapshot = fullPath + HdfsConstants.DOT_SNAPSHOT_DIR;
|
||||||
hasRemaining |= processChildrenList(dirSnapshot);
|
hasRemaining |= processPath(dirSnapshot);
|
||||||
}
|
}
|
||||||
} else if (!status.isSymlink()) { // file
|
} else if (!status.isSymlink()) { // file
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.mover;
|
package org.apache.hadoop.hdfs.server.mover;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -27,12 +26,10 @@ import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.ReconfigurationException;
|
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -46,7 +43,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.StorageType;
|
import org.apache.hadoop.hdfs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
@ -514,6 +510,42 @@ public class TestStorageMover {
|
||||||
"==================================================\n\n");
|
"==================================================\n\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run Mover with arguments specifying files and directories
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testMoveSpecificPaths() throws Exception {
|
||||||
|
LOG.info("testMoveSpecificPaths");
|
||||||
|
final Path foo = new Path("/foo");
|
||||||
|
final Path barFile = new Path(foo, "bar");
|
||||||
|
final Path foo2 = new Path("/foo2");
|
||||||
|
final Path bar2File = new Path(foo2, "bar2");
|
||||||
|
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
|
||||||
|
policyMap.put(foo, COLD);
|
||||||
|
policyMap.put(foo2, WARM);
|
||||||
|
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(foo, foo2),
|
||||||
|
Arrays.asList(barFile, bar2File), BLOCK_SIZE, null, policyMap);
|
||||||
|
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
|
||||||
|
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
|
||||||
|
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
|
||||||
|
test.setupCluster();
|
||||||
|
|
||||||
|
try {
|
||||||
|
test.prepareNamespace();
|
||||||
|
test.setStoragePolicy();
|
||||||
|
|
||||||
|
Map<URI, List<Path>> map = Mover.Cli.getNameNodePathsToMove(test.conf,
|
||||||
|
"-p", "/foo/bar", "/foo2");
|
||||||
|
int result = Mover.run(map, test.conf);
|
||||||
|
Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
|
||||||
|
|
||||||
|
Thread.sleep(5000);
|
||||||
|
test.verify(true);
|
||||||
|
} finally {
|
||||||
|
test.shutdownCluster();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Move an open file into archival storage
|
* Move an open file into archival storage
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue