HDFS-12997. Move logging to slf4j in BlockPoolSliceStorage and Storage. Contributed by Ajay Kumar.

This commit is contained in:
Xiaoyu Yao 2018-01-31 23:10:54 -08:00
parent 6ca7204ceb
commit b3ae11d597
6 changed files with 178 additions and 181 deletions

View File

@ -193,10 +193,9 @@ class JNStorage extends Storage {
// /\d+/ in the regex itself. // /\d+/ in the regex itself.
long txid = Long.parseLong(matcher.group(1)); long txid = Long.parseLong(matcher.group(1));
if (txid < minTxIdToKeep) { if (txid < minTxIdToKeep) {
LOG.info("Purging no-longer needed file " + txid); LOG.info("Purging no-longer needed file {}", txid);
if (!f.delete()) { if (!f.delete()) {
LOG.warn("Unable to delete no-longer-needed data " + LOG.warn("Unable to delete no-longer-needed data {}", f);
f);
} }
break; break;
} }
@ -214,7 +213,7 @@ class JNStorage extends Storage {
} }
setStorageInfo(nsInfo); setStorageInfo(nsInfo);
LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID()); LOG.info("Formatting journal {} with nsid: {}", sd, getNamespaceID());
// Unlock the directory before formatting, because we will // Unlock the directory before formatting, because we will
// re-analyze it after format(). The analyzeStorage() call // re-analyze it after format(). The analyzeStorage() call
// below is reponsible for re-locking it. This is a no-op // below is reponsible for re-locking it. This is a no-op
@ -278,7 +277,7 @@ class JNStorage extends Storage {
} }
public void close() throws IOException { public void close() throws IOException {
LOG.info("Closing journal storage for " + sd); LOG.info("Closing journal storage for {}", sd);
unlockAll(); unlockAll();
} }

View File

@ -35,8 +35,6 @@ import java.util.Properties;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -53,7 +51,8 @@ import org.apache.hadoop.util.VersionInfo;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
@ -76,7 +75,9 @@ import com.google.common.base.Preconditions;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class Storage extends StorageInfo { public abstract class Storage extends StorageInfo {
public static final Log LOG = LogFactory.getLog(Storage.class.getName());
public static final Logger LOG = LoggerFactory
.getLogger(Storage.class.getName());
// last layout version that did not support upgrades // last layout version that did not support upgrades
public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3; public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
@ -396,7 +397,7 @@ public abstract class Storage extends StorageInfo {
return FileUtils.sizeOfDirectory(root); return FileUtils.sizeOfDirectory(root);
} }
} catch (Exception e) { } catch (Exception e) {
LOG.warn("Failed to get directory size :" + root, e); LOG.warn("Failed to get directory size : {}", root, e);
} }
return 0; return 0;
} }
@ -427,7 +428,7 @@ public abstract class Storage extends StorageInfo {
} }
if (curDir.exists()) { if (curDir.exists()) {
File[] files = FileUtil.listFiles(curDir); File[] files = FileUtil.listFiles(curDir);
LOG.info("Will remove files: " + Arrays.toString(files)); LOG.info("Will remove files: {}", Arrays.toString(files));
if (!(FileUtil.fullyDelete(curDir))) if (!(FileUtil.fullyDelete(curDir)))
throw new IOException("Cannot remove current directory: " + curDir); throw new IOException("Cannot remove current directory: " + curDir);
} }
@ -650,25 +651,25 @@ public abstract class Storage extends StorageInfo {
// storage directory does not exist // storage directory does not exist
if (startOpt != StartupOption.FORMAT && if (startOpt != StartupOption.FORMAT &&
startOpt != StartupOption.HOTSWAP) { startOpt != StartupOption.HOTSWAP) {
LOG.warn("Storage directory " + rootPath + " does not exist"); LOG.warn("Storage directory {} does not exist", rootPath);
return StorageState.NON_EXISTENT; return StorageState.NON_EXISTENT;
} }
LOG.info(rootPath + " does not exist. Creating ..."); LOG.info("{} does not exist. Creating ...", rootPath);
if (!root.mkdirs()) if (!root.mkdirs())
throw new IOException("Cannot create directory " + rootPath); throw new IOException("Cannot create directory " + rootPath);
hadMkdirs = true; hadMkdirs = true;
} }
// or is inaccessible // or is inaccessible
if (!root.isDirectory()) { if (!root.isDirectory()) {
LOG.warn(rootPath + "is not a directory"); LOG.warn("{} is not a directory", rootPath);
return StorageState.NON_EXISTENT; return StorageState.NON_EXISTENT;
} }
if (!FileUtil.canWrite(root)) { if (!FileUtil.canWrite(root)) {
LOG.warn("Cannot access storage directory " + rootPath); LOG.warn("Cannot access storage directory {}", rootPath);
return StorageState.NON_EXISTENT; return StorageState.NON_EXISTENT;
} }
} catch(SecurityException ex) { } catch(SecurityException ex) {
LOG.warn("Cannot access storage directory " + rootPath, ex); LOG.warn("Cannot access storage directory {}", rootPath, ex);
return StorageState.NON_EXISTENT; return StorageState.NON_EXISTENT;
} }
@ -770,43 +771,43 @@ public abstract class Storage extends StorageInfo {
String rootPath = root.getCanonicalPath(); String rootPath = root.getCanonicalPath();
switch(curState) { switch(curState) {
case COMPLETE_UPGRADE: // mv previous.tmp -> previous case COMPLETE_UPGRADE: // mv previous.tmp -> previous
LOG.info("Completing previous upgrade for storage directory " LOG.info("Completing previous upgrade for storage directory {}",
+ rootPath); rootPath);
rename(getPreviousTmp(), getPreviousDir()); rename(getPreviousTmp(), getPreviousDir());
return; return;
case RECOVER_UPGRADE: // mv previous.tmp -> current case RECOVER_UPGRADE: // mv previous.tmp -> current
LOG.info("Recovering storage directory " + rootPath LOG.info("Recovering storage directory {} from previous upgrade",
+ " from previous upgrade"); rootPath);
if (curDir.exists()) if (curDir.exists())
deleteDir(curDir); deleteDir(curDir);
rename(getPreviousTmp(), curDir); rename(getPreviousTmp(), curDir);
return; return;
case COMPLETE_ROLLBACK: // rm removed.tmp case COMPLETE_ROLLBACK: // rm removed.tmp
LOG.info("Completing previous rollback for storage directory " LOG.info("Completing previous rollback for storage directory {}",
+ rootPath); rootPath);
deleteDir(getRemovedTmp()); deleteDir(getRemovedTmp());
return; return;
case RECOVER_ROLLBACK: // mv removed.tmp -> current case RECOVER_ROLLBACK: // mv removed.tmp -> current
LOG.info("Recovering storage directory " + rootPath LOG.info("Recovering storage directory {} from previous rollback",
+ " from previous rollback"); rootPath);
rename(getRemovedTmp(), curDir); rename(getRemovedTmp(), curDir);
return; return;
case COMPLETE_FINALIZE: // rm finalized.tmp case COMPLETE_FINALIZE: // rm finalized.tmp
LOG.info("Completing previous finalize for storage directory " LOG.info("Completing previous finalize for storage directory {}",
+ rootPath); rootPath);
deleteDir(getFinalizedTmp()); deleteDir(getFinalizedTmp());
return; return;
case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
LOG.info("Completing previous checkpoint for storage directory " LOG.info("Completing previous checkpoint for storage directory {}",
+ rootPath); rootPath);
File prevCkptDir = getPreviousCheckpoint(); File prevCkptDir = getPreviousCheckpoint();
if (prevCkptDir.exists()) if (prevCkptDir.exists())
deleteDir(prevCkptDir); deleteDir(prevCkptDir);
rename(getLastCheckpointTmp(), prevCkptDir); rename(getLastCheckpointTmp(), prevCkptDir);
return; return;
case RECOVER_CHECKPOINT: // mv lastcheckpoint.tmp -> current case RECOVER_CHECKPOINT: // mv lastcheckpoint.tmp -> current
LOG.info("Recovering storage directory " + rootPath LOG.info("Recovering storage directory {} from failed checkpoint",
+ " from failed checkpoint"); rootPath);
if (curDir.exists()) if (curDir.exists())
deleteDir(curDir); deleteDir(curDir);
rename(getLastCheckpointTmp(), curDir); rename(getLastCheckpointTmp(), curDir);
@ -860,12 +861,12 @@ public abstract class Storage extends StorageInfo {
*/ */
public void lock() throws IOException { public void lock() throws IOException {
if (isShared()) { if (isShared()) {
LOG.info("Locking is disabled for " + this.root); LOG.info("Locking is disabled for {}", this.root);
return; return;
} }
FileLock newLock = tryLock(); FileLock newLock = tryLock();
if (newLock == null) { if (newLock == null) {
String msg = "Cannot lock storage " + this.root String msg = "Cannot lock storage " + this.root
+ ". The directory is already locked"; + ". The directory is already locked";
LOG.info(msg); LOG.info(msg);
throw new IOException(msg); throw new IOException(msg);
@ -897,22 +898,22 @@ public abstract class Storage extends StorageInfo {
try { try {
res = file.getChannel().tryLock(); res = file.getChannel().tryLock();
if (null == res) { if (null == res) {
LOG.error("Unable to acquire file lock on path " + lockF.toString()); LOG.error("Unable to acquire file lock on path {}", lockF);
throw new OverlappingFileLockException(); throw new OverlappingFileLockException();
} }
file.write(jvmName.getBytes(Charsets.UTF_8)); file.write(jvmName.getBytes(Charsets.UTF_8));
LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName); LOG.info("Lock on {} acquired by nodename {}", lockF, jvmName);
} catch(OverlappingFileLockException oe) { } catch(OverlappingFileLockException oe) {
// Cannot read from the locked file on Windows. // Cannot read from the locked file on Windows.
String lockingJvmName = Path.WINDOWS ? "" : (" " + file.readLine()); String lockingJvmName = Path.WINDOWS ? "" : (" " + file.readLine());
LOG.error("It appears that another node " + lockingJvmName LOG.error("It appears that another node {} has already locked the "
+ " has already locked the storage directory: " + root, oe); + "storage directory: {}", lockingJvmName, root, oe);
file.close(); file.close();
return null; return null;
} catch(IOException e) { } catch(IOException e) {
LOG.error("Failed to acquire lock on " + lockF LOG.error("Failed to acquire lock on {}. If this storage directory is"
+ ". If this storage directory is mounted via NFS, " + " mounted via NFS, ensure that the appropriate nfs lock services"
+ "ensure that the appropriate nfs lock services are running.", e); + " are running.", lockF, e);
file.close(); file.close();
throw e; throw e;
} }
@ -1331,10 +1332,8 @@ public abstract class Storage extends StorageInfo {
} }
if (preserveFileDate) { if (preserveFileDate) {
if (destFile.setLastModified(srcFile.lastModified()) == false) { if (destFile.setLastModified(srcFile.lastModified()) == false) {
if (LOG.isDebugEnabled()) { LOG.debug("Failed to preserve last modified date from'{}' to '{}'",
LOG.debug("Failed to preserve last modified date from'" + srcFile srcFile, destFile);
+ "' to '" + destFile + "'");
}
} }
} }
} }

View File

@ -159,17 +159,15 @@ public class BlockPoolSliceStorage extends Storage {
case NORMAL: case NORMAL:
break; break;
case NON_EXISTENT: case NON_EXISTENT:
LOG.info("Block pool storage directory for location " + location + LOG.info("Block pool storage directory for location {} and block pool"
" and block pool id " + nsInfo.getBlockPoolID() + + " id {} does not exist", location, nsInfo.getBlockPoolID());
" does not exist");
throw new IOException("Storage directory for location " + location + throw new IOException("Storage directory for location " + location +
" and block pool id " + nsInfo.getBlockPoolID() + " and block pool id " + nsInfo.getBlockPoolID() +
" does not exist"); " does not exist");
case NOT_FORMATTED: // format case NOT_FORMATTED: // format
LOG.info("Block pool storage directory for location " + location + LOG.info("Block pool storage directory for location {} and block pool"
" and block pool id " + nsInfo.getBlockPoolID() + " id {} is not formatted. Formatting ...", location,
+ " is not formatted for " + nsInfo.getBlockPoolID() nsInfo.getBlockPoolID());
+ ". Formatting ...");
format(sd, nsInfo); format(sd, nsInfo);
break; break;
default: // recovery part is common default: // recovery part is common
@ -227,8 +225,8 @@ public class BlockPoolSliceStorage extends Storage {
nsInfo, location, startOpt, callables, conf); nsInfo, location, startOpt, callables, conf);
succeedDirs.add(sd); succeedDirs.add(sd);
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Failed to analyze storage directories for block pool " LOG.warn("Failed to analyze storage directories for block pool {}",
+ nsInfo.getBlockPoolID(), e); nsInfo.getBlockPoolID(), e);
throw e; throw e;
} }
return succeedDirs; return succeedDirs;
@ -250,7 +248,8 @@ public class BlockPoolSliceStorage extends Storage {
StorageLocation location, StartupOption startOpt, StorageLocation location, StartupOption startOpt,
List<Callable<StorageDirectory>> callables, Configuration conf) List<Callable<StorageDirectory>> callables, Configuration conf)
throws IOException { throws IOException {
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID()); LOG.info("Analyzing storage directories for bpid {}", nsInfo
.getBlockPoolID());
final List<StorageDirectory> loaded = loadBpStorageDirectories( final List<StorageDirectory> loaded = loadBpStorageDirectories(
nsInfo, location, startOpt, callables, conf); nsInfo, location, startOpt, callables, conf);
for (StorageDirectory sd : loaded) { for (StorageDirectory sd : loaded) {
@ -278,8 +277,8 @@ public class BlockPoolSliceStorage extends Storage {
* @throws IOException Signals that an I/O exception has occurred. * @throws IOException Signals that an I/O exception has occurred.
*/ */
private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException { private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
LOG.info("Formatting block pool " + blockpoolID + " directory " LOG.info("Formatting block pool {} directory {}", blockpoolID, bpSdir
+ bpSdir.getCurrentDir()); .getCurrentDir());
bpSdir.clearDirectory(); // create directory bpSdir.clearDirectory(); // create directory
this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
this.cTime = nsInfo.getCTime(); this.cTime = nsInfo.getCTime();
@ -295,7 +294,7 @@ public class BlockPoolSliceStorage extends Storage {
*/ */
void remove(File absPathToRemove) { void remove(File absPathToRemove) {
Preconditions.checkArgument(absPathToRemove.isAbsolute()); Preconditions.checkArgument(absPathToRemove.isAbsolute());
LOG.info("Removing block level storage: " + absPathToRemove); LOG.info("Removing block level storage: {}", absPathToRemove);
for (Iterator<StorageDirectory> it = getStorageDirs().iterator(); for (Iterator<StorageDirectory> it = getStorageDirs().iterator();
it.hasNext(); ) { it.hasNext(); ) {
StorageDirectory sd = it.next(); StorageDirectory sd = it.next();
@ -375,7 +374,7 @@ public class BlockPoolSliceStorage extends Storage {
// during rolling upgrade rollback. They are deleted during rolling // during rolling upgrade rollback. They are deleted during rolling
// upgrade downgrade. // upgrade downgrade.
int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd)); int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
LOG.info("Restored " + restored + " block files from trash."); LOG.info("Restored {} block files from trash.", restored);
} }
readProperties(sd); readProperties(sd);
checkVersionUpgradable(this.layoutVersion); checkVersionUpgradable(this.layoutVersion);
@ -399,9 +398,9 @@ public class BlockPoolSliceStorage extends Storage {
} }
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) { if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd)); int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
LOG.info("Restored " + restored + " block files from trash " + LOG.info("Restored {} block files from trash " +
"before the layout upgrade. These blocks will be moved to " + "before the layout upgrade. These blocks will be moved to " +
"the previous directory during the upgrade"); "the previous directory during the upgrade", restored);
} }
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) { || this.cTime < nsInfo.getCTime()) {
@ -448,11 +447,10 @@ public class BlockPoolSliceStorage extends Storage {
return; return;
} }
final int oldLV = getLayoutVersion(); final int oldLV = getLayoutVersion();
LOG.info("Upgrading block pool storage directory " + bpSd.getRoot() LOG.info("Upgrading block pool storage directory {}.\n old LV = {}; old"
+ ".\n old LV = " + oldLV + " CTime = {}.\n new LV = {}; new CTime = {}",
+ "; old CTime = " + this.getCTime() bpSd.getRoot(), oldLV, this.getCTime(), HdfsServerConstants
+ ".\n new LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION .DATANODE_LAYOUT_VERSION, nsInfo.getCTime());
+ "; new CTime = " + nsInfo.getCTime());
// get <SD>/previous directory // get <SD>/previous directory
String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath()); String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot)); StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot));
@ -508,7 +506,7 @@ public class BlockPoolSliceStorage extends Storage {
// 4.rename <SD>/current/<bpid>/previous.tmp to // 4.rename <SD>/current/<bpid>/previous.tmp to
// <SD>/current/<bpid>/previous // <SD>/current/<bpid>/previous
rename(bpTmpDir, bpPrevDir); rename(bpTmpDir, bpPrevDir);
LOG.info("Upgrade of " + name + " is complete"); LOG.info("Upgrade of {} is complete", name);
} }
/** /**
@ -569,8 +567,8 @@ public class BlockPoolSliceStorage extends Storage {
// Failsafe - we should not hit this case but let's make sure // Failsafe - we should not hit this case but let's make sure
// we never overwrite a newer version of a block file with an // we never overwrite a newer version of a block file with an
// older version. // older version.
LOG.info("Not overwriting " + newChild + " with smaller file from " + LOG.info("Not overwriting {} with smaller file from " +
"trash directory. This message can be safely ignored."); "trash directory. This message can be safely ignored.", newChild);
} else if (!child.renameTo(newChild)) { } else if (!child.renameTo(newChild)) {
throw new IOException("Failed to rename " + child + " to " + newChild); throw new IOException("Failed to rename " + child + " to " + newChild);
} else { } else {
@ -616,10 +614,10 @@ public class BlockPoolSliceStorage extends Storage {
+ " is newer than the namespace state: LV = " + " is newer than the namespace state: LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime()); + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime());
} }
LOG.info("Rolling back storage directory " + bpSd.getRoot() LOG.info("Rolling back storage directory {}.\n target LV = {}; target "
+ ".\n target LV = " + nsInfo.getLayoutVersion() + "CTime = {}", bpSd.getRoot(), nsInfo.getLayoutVersion(),
+ "; target CTime = " + nsInfo.getCTime()); nsInfo.getCTime());
File tmpDir = bpSd.getRemovedTmp(); File tmpDir = bpSd.getRemovedTmp();
assert !tmpDir.exists() : "removed.tmp directory must not exist."; assert !tmpDir.exists() : "removed.tmp directory must not exist.";
// 1. rename current to tmp // 1. rename current to tmp
@ -632,7 +630,7 @@ public class BlockPoolSliceStorage extends Storage {
// 3. delete removed.tmp dir // 3. delete removed.tmp dir
deleteDir(tmpDir); deleteDir(tmpDir);
LOG.info("Rollback of " + bpSd.getRoot() + " is complete"); LOG.info("Rollback of {} is complete", bpSd.getRoot());
} }
/* /*
@ -651,9 +649,9 @@ public class BlockPoolSliceStorage extends Storage {
return; // already finalized return; // already finalized
} }
final String dataDirPath = bpSd.getRoot().getCanonicalPath(); final String dataDirPath = bpSd.getRoot().getCanonicalPath();
LOG.info("Finalizing upgrade for storage directory " + dataDirPath LOG.info("Finalizing upgrade for storage directory {}.\n cur LV = {}; "
+ ".\n cur LV = " + this.getLayoutVersion() + "; cur CTime = " + "cur CTime = {}", dataDirPath, this.getLayoutVersion(),
+ this.getCTime()); this.getCTime());
assert bpSd.getCurrentDir().exists() : "Current directory must exist."; assert bpSd.getCurrentDir().exists() : "Current directory must exist.";
// rename previous to finalized.tmp // rename previous to finalized.tmp
@ -667,9 +665,9 @@ public class BlockPoolSliceStorage extends Storage {
try { try {
deleteDir(tmpDir); deleteDir(tmpDir);
} catch (IOException ex) { } catch (IOException ex) {
LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex); LOG.error("Finalize upgrade for {} failed.", dataDirPath, ex);
} }
LOG.info("Finalize upgrade for " + dataDirPath + " is complete."); LOG.info("Finalize upgrade for {} is complete.", dataDirPath);
} }
@Override @Override
@ -695,8 +693,8 @@ public class BlockPoolSliceStorage extends Storage {
diskLayoutVersion, hardLink, conf); diskLayoutVersion, hardLink, conf);
DataStorage.linkBlocks(fromDir, toDir, DataStorage.STORAGE_DIR_RBW, DataStorage.linkBlocks(fromDir, toDir, DataStorage.STORAGE_DIR_RBW,
diskLayoutVersion, hardLink, conf); diskLayoutVersion, hardLink, conf);
LOG.info("Linked blocks from " + fromDir + " to " + toDir + ". " LOG.info("Linked blocks from {} to {}. {}", fromDir, toDir,
+ hardLink.linkStats.report()); hardLink.linkStats.report());
} }
/** /**
@ -763,7 +761,7 @@ public class BlockPoolSliceStorage extends Storage {
File blockFile = new File(blockURI); File blockFile = new File(blockURI);
return getTrashDirectory(blockFile); return getTrashDirectory(blockFile);
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
LOG.warn("Failed to get block file for replica " + info, e); LOG.warn("Failed to get block file for replica {}", info, e);
} }
return null; return null;
@ -791,7 +789,7 @@ public class BlockPoolSliceStorage extends Storage {
String getRestoreDirectory(File blockFile) { String getRestoreDirectory(File blockFile) {
Matcher matcher = BLOCK_POOL_TRASH_PATH_PATTERN.matcher(blockFile.getParent()); Matcher matcher = BLOCK_POOL_TRASH_PATH_PATTERN.matcher(blockFile.getParent());
String restoreDirectory = matcher.replaceFirst("$1$2" + STORAGE_DIR_CURRENT + "$4"); String restoreDirectory = matcher.replaceFirst("$1$2" + STORAGE_DIR_CURRENT + "$4");
LOG.info("Restoring " + blockFile + " to " + restoreDirectory); LOG.info("Restoring {} to {}", blockFile, restoreDirectory);
return restoreDirectory; return restoreDirectory;
} }
@ -804,7 +802,7 @@ public class BlockPoolSliceStorage extends Storage {
File trashRoot = getTrashRootDir(sd); File trashRoot = getTrashRootDir(sd);
if (trashRoot.exists() && sd.getPreviousDir().exists()) { if (trashRoot.exists() && sd.getPreviousDir().exists()) {
LOG.error("Trash and PreviousDir shouldn't both exist for storage " LOG.error("Trash and PreviousDir shouldn't both exist for storage "
+ "directory " + sd); + "directory {}", sd);
assert false; assert false;
} else { } else {
trashRoots.add(trashRoot); trashRoots.add(trashRoot);
@ -817,7 +815,7 @@ public class BlockPoolSliceStorage extends Storage {
public void run() { public void run() {
for(File trashRoot : trashRoots){ for(File trashRoot : trashRoots){
FileUtil.fullyDelete(trashRoot); FileUtil.fullyDelete(trashRoot);
LOG.info("Cleared trash for storage directory " + trashRoot); LOG.info("Cleared trash for storage directory {}", trashRoot);
} }
} }
@ -860,9 +858,9 @@ public class BlockPoolSliceStorage extends Storage {
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE); File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) { if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) {
if (!markerFile.exists() && markerFile.createNewFile()) { if (!markerFile.exists() && markerFile.createNewFile()) {
LOG.info("Created " + markerFile); LOG.info("Created {}", markerFile);
} else { } else {
LOG.info(markerFile + " already exists."); LOG.info("{} already exists.", markerFile);
} }
storagesWithRollingUpgradeMarker.add(bpRoot.toString()); storagesWithRollingUpgradeMarker.add(bpRoot.toString());
storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString()); storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString());
@ -885,10 +883,10 @@ public class BlockPoolSliceStorage extends Storage {
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE); File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) { if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) {
if (markerFile.exists()) { if (markerFile.exists()) {
LOG.info("Deleting " + markerFile); LOG.info("Deleting {}", markerFile);
doFinalize(sd.getCurrentDir()); doFinalize(sd.getCurrentDir());
if (!markerFile.delete()) { if (!markerFile.delete()) {
LOG.warn("Failed to delete " + markerFile); LOG.warn("Failed to delete {}", markerFile);
} }
} }
storagesWithoutRollingUpgradeMarker.add(bpRoot.toString()); storagesWithoutRollingUpgradeMarker.add(bpRoot.toString());

View File

@ -157,8 +157,8 @@ public class DataStorage extends Storage {
} }
if (oldStorageID == null || regenerateStorageIds) { if (oldStorageID == null || regenerateStorageIds) {
sd.setStorageUuid(DatanodeStorage.generateUuid()); sd.setStorageUuid(DatanodeStorage.generateUuid());
LOG.info("Generated new storageID " + sd.getStorageUuid() + LOG.info("Generated new storageID {} for directory {} {}", sd
" for directory " + sd.getRoot() + .getStorageUuid(), sd.getRoot(),
(oldStorageID == null ? "" : (" to replace " + oldStorageID))); (oldStorageID == null ? "" : (" to replace " + oldStorageID)));
return true; return true;
} }
@ -173,7 +173,7 @@ public class DataStorage extends Storage {
public void enableTrash(String bpid) { public void enableTrash(String bpid) {
if (trashEnabledBpids.add(bpid)) { if (trashEnabledBpids.add(bpid)) {
getBPStorage(bpid).stopTrashCleaner(); getBPStorage(bpid).stopTrashCleaner();
LOG.info("Enabled trash for bpid " + bpid); LOG.info("Enabled trash for bpid {}", bpid);
} }
} }
@ -181,7 +181,7 @@ public class DataStorage extends Storage {
if (trashEnabledBpids.contains(bpid)) { if (trashEnabledBpids.contains(bpid)) {
getBPStorage(bpid).clearTrash(); getBPStorage(bpid).clearTrash();
trashEnabledBpids.remove(bpid); trashEnabledBpids.remove(bpid);
LOG.info("Cleared trash for bpid " + bpid); LOG.info("Cleared trash for bpid {}", bpid);
} }
} }
@ -275,14 +275,12 @@ public class DataStorage extends Storage {
case NORMAL: case NORMAL:
break; break;
case NON_EXISTENT: case NON_EXISTENT:
LOG.info("Storage directory with location " + location LOG.info("Storage directory with location {} does not exist", location);
+ " does not exist");
throw new IOException("Storage directory with location " + location throw new IOException("Storage directory with location " + location
+ " does not exist"); + " does not exist");
case NOT_FORMATTED: // format case NOT_FORMATTED: // format
LOG.info("Storage directory with location " + location LOG.info("Storage directory with location {} is not formatted for "
+ " is not formatted for namespace " + nsInfo.getNamespaceID() + "namespace {}. Formatting...", location, nsInfo.getNamespaceID());
+ ". Formatting...");
format(sd, nsInfo, datanode.getDatanodeUuid(), datanode.getConf()); format(sd, nsInfo, datanode.getDatanodeUuid(), datanode.getConf());
break; break;
default: // recovery part is common default: // recovery part is common
@ -325,8 +323,8 @@ public class DataStorage extends Storage {
StorageLocation location, List<NamespaceInfo> nsInfos) StorageLocation location, List<NamespaceInfo> nsInfos)
throws IOException { throws IOException {
if (containsStorageDir(location)) { if (containsStorageDir(location)) {
final String errorMessage = "Storage directory is in use"; final String errorMessage = "Storage directory is in use.";
LOG.warn(errorMessage + "."); LOG.warn(errorMessage);
throw new IOException(errorMessage); throw new IOException(errorMessage);
} }
@ -353,8 +351,8 @@ public class DataStorage extends Storage {
throw new HadoopIllegalArgumentException(key + " = " + n + " < 1"); throw new HadoopIllegalArgumentException(key + " = " + n + " < 1");
} }
final int min = Math.min(n, dataDirs); final int min = Math.min(n, dataDirs);
LOG.info("Using " + min + " threads to upgrade data directories (" LOG.info("Using {} threads to upgrade data directories ({}={}, "
+ key + "=" + n + ", dataDirs=" + dataDirs + ")"); + "dataDirs={})", min, key, n, dataDirs);
return min; return min;
} }
@ -417,22 +415,22 @@ public class DataStorage extends Storage {
} }
} }
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Failed to add storage directory " + dataDir, e); LOG.warn("Failed to add storage directory {}", dataDir, e);
} }
} else { } else {
LOG.info("Storage directory " + dataDir + " has already been used."); LOG.info("Storage directory {} has already been used.", dataDir);
success.add(dataDir); success.add(dataDir);
} }
} }
if (!tasks.isEmpty()) { if (!tasks.isEmpty()) {
LOG.info("loadDataStorage: " + tasks.size() + " upgrade tasks"); LOG.info("loadDataStorage: {} upgrade tasks", tasks.size());
for(UpgradeTask t : tasks) { for(UpgradeTask t : tasks) {
try { try {
addStorageDir(t.future.get()); addStorageDir(t.future.get());
success.add(t.dataDir); success.add(t.dataDir);
} catch (ExecutionException e) { } catch (ExecutionException e) {
LOG.warn("Failed to upgrade storage directory " + t.dataDir, e); LOG.warn("Failed to upgrade storage directory {}", t.dataDir, e);
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw DFSUtilClient.toInterruptedIOException("Task interrupted", e); throw DFSUtilClient.toInterruptedIOException("Task interrupted", e);
} }
@ -465,19 +463,19 @@ public class DataStorage extends Storage {
} }
} }
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Failed to add storage directory " + dataDir LOG.warn("Failed to add storage directory {} for block pool {}",
+ " for block pool " + bpid, e); dataDir, bpid, e);
} }
} }
if (!tasks.isEmpty()) { if (!tasks.isEmpty()) {
LOG.info("loadBlockPoolSliceStorage: " + tasks.size() + " upgrade tasks"); LOG.info("loadBlockPoolSliceStorage: {} upgrade tasks", tasks.size());
for(UpgradeTask t : tasks) { for(UpgradeTask t : tasks) {
try { try {
success.add(t.future.get()); success.add(t.future.get());
} catch (ExecutionException e) { } catch (ExecutionException e) {
LOG.warn("Failed to upgrade storage directory " + t.dataDir LOG.warn("Failed to upgrade storage directory {} for block pool {}",
+ " for block pool " + bpid, e); t.dataDir, bpid, e);
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw DFSUtilClient.toInterruptedIOException("Task interrupted", e); throw DFSUtilClient.toInterruptedIOException("Task interrupted", e);
} }
@ -491,7 +489,7 @@ public class DataStorage extends Storage {
* Remove storage dirs from DataStorage. All storage dirs are removed even when the * Remove storage dirs from DataStorage. All storage dirs are removed even when the
* IOException is thrown. * IOException is thrown.
* *
* @param dirsToRemove a set of storage directories to be removed. * @param storageLocations a set of storage directories to be removed.
* @throws IOException if I/O error when unlocking storage directory. * @throws IOException if I/O error when unlocking storage directory.
*/ */
synchronized void removeVolumes( synchronized void removeVolumes(
@ -521,9 +519,8 @@ public class DataStorage extends Storage {
try { try {
sd.unlock(); sd.unlock();
} catch (IOException e) { } catch (IOException e) {
LOG.warn(String.format( LOG.warn("I/O error attempting to unlock storage directory {}.",
"I/O error attempting to unlock storage directory %s.", sd.getRoot(), e);
sd.getRoot()), e);
errorMsgBuilder.append(String.format("Failed to remove %s: %s%n", errorMsgBuilder.append(String.format("Failed to remove %s: %s%n",
sd.getRoot(), e.getMessage())); sd.getRoot(), e.getMessage()));
} }
@ -672,7 +669,7 @@ public class DataStorage extends Storage {
try (RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); try (RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
FileLock oldLock = oldFile.getChannel().tryLock()) { FileLock oldLock = oldFile.getChannel().tryLock()) {
if (null == oldLock) { if (null == oldLock) {
LOG.error("Unable to acquire file lock on path " + oldF.toString()); LOG.error("Unable to acquire file lock on path {}", oldF);
throw new OverlappingFileLockException(); throw new OverlappingFileLockException();
} }
oldFile.seek(0); oldFile.seek(0);
@ -793,11 +790,10 @@ public class DataStorage extends Storage {
final List<Callable<StorageDirectory>> callables, final List<Callable<StorageDirectory>> callables,
final Configuration conf) throws IOException { final Configuration conf) throws IOException {
final int oldLV = getLayoutVersion(); final int oldLV = getLayoutVersion();
LOG.info("Upgrading storage directory " + sd.getRoot() LOG.info("Upgrading storage directory {}.\n old LV = {}; old CTime = {}"
+ ".\n old LV = " + oldLV + ".\n new LV = {}; new CTime = {}", sd.getRoot(), oldLV,
+ "; old CTime = " + this.getCTime() this.getCTime(), HdfsServerConstants.DATANODE_LAYOUT_VERSION,
+ ".\n new LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION nsInfo.getCTime());
+ "; new CTime = " + nsInfo.getCTime());
final File curDir = sd.getCurrentDir(); final File curDir = sd.getCurrentDir();
final File prevDir = sd.getPreviousDir(); final File prevDir = sd.getPreviousDir();
@ -850,15 +846,15 @@ public class DataStorage extends Storage {
// 5. Rename <SD>/previous.tmp to <SD>/previous // 5. Rename <SD>/previous.tmp to <SD>/previous
rename(tmpDir, prevDir); rename(tmpDir, prevDir);
LOG.info("Upgrade of " + sd.getRoot()+ " is complete"); LOG.info("Upgrade of {} is complete", sd.getRoot());
} }
void upgradeProperties(StorageDirectory sd, Configuration conf) void upgradeProperties(StorageDirectory sd, Configuration conf)
throws IOException { throws IOException {
createStorageID(sd, layoutVersion, conf); createStorageID(sd, layoutVersion, conf);
LOG.info("Updating layout version from " + layoutVersion LOG.info("Updating layout version from {} to {} for storage {}",
+ " to " + HdfsServerConstants.DATANODE_LAYOUT_VERSION layoutVersion, HdfsServerConstants.DATANODE_LAYOUT_VERSION,
+ " for storage " + sd.getRoot()); sd.getRoot());
layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
writeProperties(sd); writeProperties(sd);
} }
@ -914,9 +910,8 @@ public class DataStorage extends Storage {
HdfsServerConstants.DATANODE_LAYOUT_VERSION)) { HdfsServerConstants.DATANODE_LAYOUT_VERSION)) {
readProperties(sd, HdfsServerConstants.DATANODE_LAYOUT_VERSION); readProperties(sd, HdfsServerConstants.DATANODE_LAYOUT_VERSION);
writeProperties(sd); writeProperties(sd);
LOG.info("Layout version rolled back to " LOG.info("Layout version rolled back to {} for storage {}",
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " for storage " HdfsServerConstants.DATANODE_LAYOUT_VERSION, sd.getRoot());
+ sd.getRoot());
} }
return; return;
} }
@ -933,9 +928,9 @@ public class DataStorage extends Storage {
+ " is newer than the namespace state: LV = " + " is newer than the namespace state: LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = "
+ nsInfo.getCTime()); + nsInfo.getCTime());
LOG.info("Rolling back storage directory " + sd.getRoot() LOG.info("Rolling back storage directory {}.\n target LV = {}; target "
+ ".\n target LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION + "CTime = {}", sd.getRoot(),
+ "; target CTime = " + nsInfo.getCTime()); HdfsServerConstants.DATANODE_LAYOUT_VERSION, nsInfo.getCTime());
File tmpDir = sd.getRemovedTmp(); File tmpDir = sd.getRemovedTmp();
assert !tmpDir.exists() : "removed.tmp directory must not exist."; assert !tmpDir.exists() : "removed.tmp directory must not exist.";
// rename current to tmp // rename current to tmp
@ -946,7 +941,7 @@ public class DataStorage extends Storage {
rename(prevDir, curDir); rename(prevDir, curDir);
// delete tmp dir // delete tmp dir
deleteDir(tmpDir); deleteDir(tmpDir);
LOG.info("Rollback of " + sd.getRoot() + " is complete"); LOG.info("Rollback of {} is complete", sd.getRoot());
} }
/** /**
@ -964,10 +959,9 @@ public class DataStorage extends Storage {
return; // already discarded return; // already discarded
final String dataDirPath = sd.getRoot().getCanonicalPath(); final String dataDirPath = sd.getRoot().getCanonicalPath();
LOG.info("Finalizing upgrade for storage directory " LOG.info("Finalizing upgrade for storage directory {}.\n cur LV = {}; "
+ dataDirPath + "cur CTime = {}", dataDirPath, this.getLayoutVersion(), this
+ ".\n cur LV = " + this.getLayoutVersion() .getCTime());
+ "; cur CTime = " + this.getCTime());
assert sd.getCurrentDir().exists() : "Current directory must exist."; assert sd.getCurrentDir().exists() : "Current directory must exist.";
final File tmpDir = sd.getFinalizedTmp();//finalized.tmp directory final File tmpDir = sd.getFinalizedTmp();//finalized.tmp directory
final File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW); final File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
@ -1055,8 +1049,8 @@ public class DataStorage extends Storage {
diskLayoutVersion, hardLink, conf); diskLayoutVersion, hardLink, conf);
} }
} }
LOG.info("Linked blocks from " + fromDir + " to " + toDir + ". " LOG.info("Linked blocks from {} to {}. {}", fromDir, toDir, hardLink
+ hardLink.linkStats.report()); .linkStats.report());
} }
private static class LinkArgs { private static class LinkArgs {
@ -1077,7 +1071,7 @@ public class DataStorage extends Storage {
private static void linkBlocks(File from, File to, int oldLV, private static void linkBlocks(File from, File to, int oldLV,
HardLink hl, Configuration conf) throws IOException { HardLink hl, Configuration conf) throws IOException {
LOG.info("Start linking block files from " + from + " to " + to); LOG.info("Start linking block files from {} to {}", from, to);
boolean upgradeToIdBasedLayout = false; boolean upgradeToIdBasedLayout = false;
// If we are upgrading from a version older than the one where we introduced // If we are upgrading from a version older than the one where we introduced
// block ID-based layout (32x32) AND we're working with the finalized // block ID-based layout (32x32) AND we're working with the finalized
@ -1098,8 +1092,8 @@ public class DataStorage extends Storage {
final ArrayList<LinkArgs> duplicates = final ArrayList<LinkArgs> duplicates =
findDuplicateEntries(idBasedLayoutSingleLinks); findDuplicateEntries(idBasedLayoutSingleLinks);
if (!duplicates.isEmpty()) { if (!duplicates.isEmpty()) {
LOG.error("There are " + duplicates.size() + " duplicate block " + LOG.error("There are {} duplicate block " +
"entries within the same volume."); "entries within the same volume.", duplicates.size());
removeDuplicateEntries(idBasedLayoutSingleLinks, duplicates); removeDuplicateEntries(idBasedLayoutSingleLinks, duplicates);
} }
@ -1241,8 +1235,8 @@ public class DataStorage extends Storage {
} }
} }
if (!found) { if (!found) {
LOG.warn("Unexpectedly low genstamp on " + LOG.warn("Unexpectedly low genstamp on {}.",
duplicate.src.getAbsolutePath() + "."); duplicate.src.getAbsolutePath());
iter.remove(); iter.remove();
} }
} }
@ -1265,13 +1259,13 @@ public class DataStorage extends Storage {
long blockLength = duplicate.src.length(); long blockLength = duplicate.src.length();
long prevBlockLength = prevLongest.src.length(); long prevBlockLength = prevLongest.src.length();
if (blockLength < prevBlockLength) { if (blockLength < prevBlockLength) {
LOG.warn("Unexpectedly short length on " + LOG.warn("Unexpectedly short length on {}.",
duplicate.src.getAbsolutePath() + "."); duplicate.src.getAbsolutePath());
continue; continue;
} }
if (blockLength > prevBlockLength) { if (blockLength > prevBlockLength) {
LOG.warn("Unexpectedly short length on " + LOG.warn("Unexpectedly short length on {}.",
prevLongest.src.getAbsolutePath() + "."); prevLongest.src.getAbsolutePath());
} }
longestBlockFiles.put(blockId, duplicate); longestBlockFiles.put(blockId, duplicate);
} }
@ -1286,7 +1280,7 @@ public class DataStorage extends Storage {
continue; // file has no duplicates continue; // file has no duplicates
} }
if (!bestDuplicate.src.getParent().equals(args.src.getParent())) { if (!bestDuplicate.src.getParent().equals(args.src.getParent())) {
LOG.warn("Discarding " + args.src.getAbsolutePath() + "."); LOG.warn("Discarding {}.", args.src.getAbsolutePath());
iter.remove(); iter.remove();
} }
} }

View File

@ -202,7 +202,7 @@ public class NNStorage extends Storage implements Closeable,
return false; return false;
} }
} finally { } finally {
IOUtils.cleanup(LOG, oldFile); IOUtils.cleanupWithLogger(LOG, oldFile);
} }
return true; return true;
} }
@ -220,7 +220,7 @@ public class NNStorage extends Storage implements Closeable,
* @param val Whether restoration attempt should be made. * @param val Whether restoration attempt should be made.
*/ */
void setRestoreFailedStorage(boolean val) { void setRestoreFailedStorage(boolean val) {
LOG.warn("set restore failed storage to " + val); LOG.warn("set restore failed storage to {}", val);
restoreFailedStorage=val; restoreFailedStorage=val;
} }
@ -243,14 +243,14 @@ public class NNStorage extends Storage implements Closeable,
/* We don't want more than one thread trying to restore at a time */ /* We don't want more than one thread trying to restore at a time */
synchronized (this.restorationLock) { synchronized (this.restorationLock) {
LOG.info("NNStorage.attemptRestoreRemovedStorage: check removed(failed) "+ LOG.info("NNStorage.attemptRestoreRemovedStorage: check removed(failed) "+
"storage. removedStorages size = " + removedStorageDirs.size()); "storage. removedStorages size = {}", removedStorageDirs.size());
for (StorageDirectory sd : this.removedStorageDirs) { for (StorageDirectory sd : this.removedStorageDirs) {
File root = sd.getRoot(); File root = sd.getRoot();
LOG.info("currently disabled dir " + root.getAbsolutePath() + LOG.info("currently disabled dir {}; type={} ;canwrite={}", root
"; type=" + sd.getStorageDirType() .getAbsolutePath(), sd.getStorageDirType(),
+ ";canwrite=" + FileUtil.canWrite(root)); FileUtil.canWrite(root));
if (root.exists() && FileUtil.canWrite(root)) { if (root.exists() && FileUtil.canWrite(root)) {
LOG.info("restoring dir " + sd.getRoot().getAbsolutePath()); LOG.info("restoring dir {}", sd.getRoot().getAbsolutePath());
this.addStorageDir(sd); // restore this.addStorageDir(sd); // restore
this.removedStorageDirs.remove(sd); this.removedStorageDirs.remove(sd);
} }
@ -510,7 +510,7 @@ public class NNStorage extends Storage implements Closeable,
writeTransactionIdFile(sd, txid); writeTransactionIdFile(sd, txid);
} catch(IOException e) { } catch(IOException e) {
// Close any edits stream associated with this dir and remove directory // Close any edits stream associated with this dir and remove directory
LOG.warn("writeTransactionIdToStorage failed on " + sd, LOG.warn("writeTransactionIdToStorage failed on {}", sd,
e); e);
reportErrorsOnDirectory(sd); reportErrorsOnDirectory(sd);
} }
@ -580,8 +580,8 @@ public class NNStorage extends Storage implements Closeable,
writeProperties(sd); writeProperties(sd);
writeTransactionIdFile(sd, 0); writeTransactionIdFile(sd, 0);
LOG.info("Storage directory " + sd.getRoot() LOG.info("Storage directory {} has been successfully formatted.",
+ " has been successfully formatted."); sd.getRoot());
} }
/** /**
@ -861,26 +861,30 @@ public class NNStorage extends Storage implements Closeable,
* @param sd A storage directory to mark as errored. * @param sd A storage directory to mark as errored.
*/ */
private void reportErrorsOnDirectory(StorageDirectory sd) { private void reportErrorsOnDirectory(StorageDirectory sd) {
LOG.error("Error reported on storage directory " + sd); LOG.error("Error reported on storage directory {}", sd);
String lsd = listStorageDirectories(); if(LOG.isDebugEnabled()){
LOG.debug("current list of storage dirs:" + lsd); String lsd = listStorageDirectories();
LOG.debug("current list of storage dirs:{}", lsd);
}
LOG.warn("About to remove corresponding storage: " LOG.warn("About to remove corresponding storage: {}", sd.getRoot()
+ sd.getRoot().getAbsolutePath()); .getAbsolutePath());
try { try {
sd.unlock(); sd.unlock();
} catch (Exception e) { } catch (Exception e) {
LOG.warn("Unable to unlock bad storage directory: " LOG.warn("Unable to unlock bad storage directory: {}", sd.getRoot()
+ sd.getRoot().getPath(), e); .getPath(), e);
} }
if (getStorageDirs().remove(sd)) { if (getStorageDirs().remove(sd)) {
this.removedStorageDirs.add(sd); this.removedStorageDirs.add(sd);
} }
lsd = listStorageDirectories(); if(LOG.isDebugEnabled()){
LOG.debug("at the end current list of storage dirs:" + lsd); String lsd = listStorageDirectories();
LOG.debug("at the end current list of storage dirs:{}", lsd);
}
} }
/** /**
@ -911,11 +915,12 @@ public class NNStorage extends Storage implements Closeable,
// clusterid. // clusterid.
if (startOpt.getClusterId() != null if (startOpt.getClusterId() != null
&& !startOpt.getClusterId().equals(getClusterID())) { && !startOpt.getClusterId().equals(getClusterID())) {
LOG.warn("Clusterid mismatch - current clusterid: " + getClusterID() LOG.warn("Clusterid mismatch - current clusterid: {}, Ignoring "
+ ", Ignoring given clusterid: " + startOpt.getClusterId()); + "given clusterid: {}", getClusterID(),
startOpt.getClusterId());
} }
} }
LOG.info("Using clusterid: " + getClusterID()); LOG.info("Using clusterid: {}", getClusterID());
} }
} }
@ -984,14 +989,14 @@ public class NNStorage extends Storage implements Closeable,
} else { } else {
cid = props.getProperty("clusterID"); cid = props.getProperty("clusterID");
} }
LOG.info("current cluster id for sd="+sd.getCurrentDir() + LOG.info("current cluster id for sd={};lv={};"
";lv=" + layoutVersion + ";cid=" + cid); + "cid={}", sd.getCurrentDir(), layoutVersion, cid);
if(cid != null && !cid.equals("")) { if(cid != null && !cid.equals("")) {
return cid; return cid;
} }
} catch (Exception e) { } catch (Exception e) {
LOG.warn("this sd not available: " + e.getLocalizedMessage()); LOG.warn("this sd not available: {}", e.getLocalizedMessage());
} //ignore } //ignore
} }
LOG.warn("couldn't find any VERSION file containing valid ClusterId"); LOG.warn("couldn't find any VERSION file containing valid ClusterId");
@ -1151,8 +1156,8 @@ public class NNStorage extends Storage implements Closeable,
try { try {
writeProperties(sd); writeProperties(sd);
} catch (Exception e) { } catch (Exception e) {
LOG.warn("Error during write properties to the VERSION file to " + LOG.warn("Error during write properties to the VERSION file to {}",
sd.toString(), e); sd, e);
reportErrorsOnDirectory(sd); reportErrorsOnDirectory(sd);
if (getStorageDirs().isEmpty()) { if (getStorageDirs().isEmpty()) {
throw new IOException("All the storage failed while writing " + throw new IOException("All the storage failed while writing " +

View File

@ -17,8 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -26,6 +24,8 @@ import org.mockito.Mockito;
import java.io.File; import java.io.File;
import java.util.Random; import java.util.Random;
import java.util.UUID; import java.util.UUID;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.hamcrest.core.Is.is; import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
@ -35,7 +35,9 @@ import static org.junit.Assert.assertThat;
* restore directories for a given block file path. * restore directories for a given block file path.
*/ */
public class TestBlockPoolSliceStorage { public class TestBlockPoolSliceStorage {
public static final Log LOG = LogFactory.getLog(TestBlockPoolSliceStorage.class);
public static final Logger LOG = LoggerFactory
.getLogger(TestBlockPoolSliceStorage.class);
final Random rand = new Random(); final Random rand = new Random();
BlockPoolSliceStorage storage; BlockPoolSliceStorage storage;
@ -104,8 +106,8 @@ public class TestBlockPoolSliceStorage {
BlockPoolSliceStorage.TRASH_ROOT_DIR + BlockPoolSliceStorage.TRASH_ROOT_DIR +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1); blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Got subdir " + blockFileSubdir); LOG.info("Got subdir {}", blockFileSubdir);
LOG.info("Generated file path " + testFilePath); LOG.info("Generated file path {}", testFilePath);
ReplicaInfo info = Mockito.mock(ReplicaInfo.class); ReplicaInfo info = Mockito.mock(ReplicaInfo.class);
Mockito.when(info.getBlockURI()).thenReturn(new File(testFilePath).toURI()); Mockito.when(info.getBlockURI()).thenReturn(new File(testFilePath).toURI());
@ -131,7 +133,7 @@ public class TestBlockPoolSliceStorage {
Storage.STORAGE_DIR_CURRENT + Storage.STORAGE_DIR_CURRENT +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1); blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Generated deleted file path " + deletedFilePath); LOG.info("Generated deleted file path {}", deletedFilePath);
assertThat(storage.getRestoreDirectory(new File(deletedFilePath)), assertThat(storage.getRestoreDirectory(new File(deletedFilePath)),
is(expectedRestorePath)); is(expectedRestorePath));