HDFS-9569. Log the name of the fsimage being loaded for better supportability. (Yongjun Zhang)

This commit is contained in:
Yongjun Zhang 2016-01-12 09:22:22 -08:00
parent 13de8359a1
commit 25051c3bd0
5 changed files with 74 additions and 19 deletions

View File

@ -2600,6 +2600,9 @@ Release 2.7.3 - UNRELEASED
HDFS-9574. Reduce client failures during datanode restart (kihwal)
HDFS-9569. Log the name of the fsimage being loaded for better
supportability. (Yongjun Zhang)
OPTIMIZATIONS
BUG FIXES

View File

@ -665,14 +665,19 @@ public class FSImage implements Closeable {
LOG.info("No edit log streams selected.");
}
Exception le = null;
FSImageFile imageFile = null;
for (int i = 0; i < imageFiles.size(); i++) {
try {
imageFile = imageFiles.get(i);
loadFSImageFile(target, recovery, imageFile, startOpt);
break;
} catch (IOException ioe) {
LOG.error("Failed to load image from " + imageFile, ioe);
} catch (IllegalReservedPathException ie) {
throw new IOException("Failed to load image from " + imageFile,
ie);
} catch (Exception e) {
le = e;
LOG.error("Failed to load image from " + imageFile, e);
target.clear();
imageFile = null;
}
@ -680,7 +685,8 @@ public class FSImage implements Closeable {
// Failed to load any images, error out
if (imageFile == null) {
FSEditLog.closeAllStreams(editStreams);
throw new IOException("Failed to load an FSImage file!");
throw new IOException("Failed to load FSImage file, see error(s) " +
"above for more info.");
}
prog.endPhase(Phase.LOADING_FSIMAGE);
@ -721,7 +727,7 @@ public class FSImage implements Closeable {
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
FSImageFile imageFile, StartupOption startupOption) throws IOException {
LOG.debug("Planning to load image :\n" + imageFile);
LOG.info("Planning to load image: " + imageFile);
StorageDirectory sdForProperties = imageFile.sd;
storage.readProperties(sdForProperties, startupOption);

View File

@ -669,7 +669,8 @@ public class FSImageFormat {
* This method is only used for image loading so that synchronization,
* modification time update and space count update are not needed.
*/
private void addToParent(INodeDirectory parent, INode child) {
private void addToParent(INodeDirectory parent, INode child)
throws IllegalReservedPathException {
FSDirectory fsDir = namesystem.dir;
if (parent == fsDir.rootDir) {
child.setLocalName(renameReservedRootComponentOnUpgrade(
@ -1097,7 +1098,7 @@ public class FSImageFormat {
* @return New path with reserved path components renamed to user value
*/
static String renameReservedPathsOnUpgrade(String path,
final int layoutVersion) {
final int layoutVersion) throws IllegalReservedPathException {
final String oldPath = path;
// If any known LVs aren't supported, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
@ -1147,13 +1148,13 @@ public class FSImageFormat {
* byte array path component.
*/
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
final int layoutVersion) {
final int layoutVersion) throws IllegalReservedPathException {
// If the LV doesn't support snapshots, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
if (Arrays.equals(component, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES)) {
Preconditions.checkArgument(
renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
RESERVED_ERROR_MSG);
if (!renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR)) {
throw new IllegalReservedPathException(RESERVED_ERROR_MSG);
}
component =
DFSUtil.string2Bytes(renameReservedMap
.get(HdfsConstants.DOT_SNAPSHOT_DIR));
@ -1167,13 +1168,13 @@ public class FSImageFormat {
* byte array path component.
*/
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
final int layoutVersion) {
final int layoutVersion) throws IllegalReservedPathException {
// If the LV doesn't support inode IDs, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
Preconditions.checkArgument(
renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
RESERVED_ERROR_MSG);
if (!renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR)) {
throw new IllegalReservedPathException(RESERVED_ERROR_MSG);
}
final String renameString = renameReservedMap
.get(FSDirectory.DOT_RESERVED_STRING);
component =

View File

@ -0,0 +1,39 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Thrown when upgrading from software release that doesn't support reserved
* path to software release that supports reserved path, and when there is
* reserved path name in the Fsimage.
*/
@InterfaceAudience.Private
public class IllegalReservedPathException extends IOException {
private static final long serialVersionUID = 1L;
public IllegalReservedPathException(String message, Throwable cause) {
super(message, cause);
}
public IllegalReservedPathException(String message) {
super(message);
}
}

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
@ -326,7 +327,7 @@ public class TestDFSUpgradeFromImage {
fail("Upgrade did not fail with bad MD5");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
if (!msg.contains("Failed to load an FSImage file")) {
if (!msg.contains("Failed to load FSImage file")) {
throw ioe;
}
int md5failures = appender.countExceptionsWithMessage(
@ -485,10 +486,15 @@ public class TestDFSUpgradeFromImage {
.format(false)
.startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build();
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"reserved path component in this version",
e);
} catch (IOException ioe) {
Throwable cause = ioe.getCause();
if (cause != null && cause instanceof IllegalReservedPathException) {
GenericTestUtils.assertExceptionContains(
"reserved path component in this version",
cause);
} else {
throw ioe;
}
} finally {
if (cluster != null) {
cluster.shutdown();