diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 0417b0a638e..c60a9b701eb 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -319,9 +319,6 @@ Trunk (Unreleased) HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File System. (Shanyu Zhao via cnauroth) - HADOOP-10925. Compilation fails in native link0 function on Windows. - (cnauroth) - HADOOP-11002. shell escapes are incompatible with previous releases (aw) HADOOP-10996. Stop violence in the *_HOME (aw) @@ -509,6 +506,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11070. Create MiniKMS for testing. (tucu) + HADOOP-11057. checknative command to probe for winutils.exe on windows. + (Xiaoyu Yao via cnauroth) + OPTIMIZATIONS HADOOP-10838. Byte array native checksumming. (James Thomas via todd) @@ -774,6 +774,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11071. KMSClientProvider should drain the local generated EEK cache on key rollover. (tucu) + HADOOP-10925. Compilation fails in native link0 function on Windows. + (cnauroth) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java index 0d87bceda17..641635542fa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java @@ -37,7 +37,8 @@ public class NativeLibraryChecker { public static void main(String[] args) { String usage = "NativeLibraryChecker [-a|-h]\n" + " -a use -a to check all libraries are available\n" - + " by default just check hadoop library is available\n" + + " by default just check hadoop library (and\n" + + " winutils.exe on Windows OS) is available\n" + " exit with error code 1 if check failed\n" + " -h print this message\n"; if (args.length > 1 || @@ -62,12 +63,16 @@ public class NativeLibraryChecker { boolean lz4Loaded = nativeHadoopLoaded; boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf); boolean openSslLoaded = false; + boolean winutilsExists = false; + String openSslDetail = ""; String hadoopLibraryName = ""; String zlibLibraryName = ""; String snappyLibraryName = ""; String lz4LibraryName = ""; String bzip2LibraryName = ""; + String winutilsPath = null; + if (nativeHadoopLoaded) { hadoopLibraryName = NativeCodeLoader.getLibraryName(); zlibLoaded = ZlibFactory.isNativeZlibLoaded(conf); @@ -93,6 +98,15 @@ public class NativeLibraryChecker { bzip2LibraryName = Bzip2Factory.getLibraryName(conf); } } + + // winutils.exe is required on Windows + winutilsPath = Shell.getWinUtilsPath(); + if (winutilsPath != null) { + winutilsExists = true; + } else { + winutilsPath = ""; + } + System.out.println("Native library checking:"); System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName); System.out.printf("zlib: %b %s\n", zlibLoaded, zlibLibraryName); @@ -100,7 +114,11 @@ public class NativeLibraryChecker { System.out.printf("lz4: %b %s\n", lz4Loaded, lz4LibraryName); System.out.printf("bzip2: %b %s\n", bzip2Loaded, bzip2LibraryName); System.out.printf("openssl: %b %s\n", openSslLoaded, openSslDetail); - if ((!nativeHadoopLoaded) || + if (Shell.WINDOWS) { + System.out.printf("winutils: %b %s\n", winutilsExists, winutilsPath); + } + + if ((!nativeHadoopLoaded) || (Shell.WINDOWS && (!winutilsExists)) || (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) { // return 1 to indicated check failed ExitUtil.terminate(1); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java index c41b56768b6..7589e5a6af8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.util; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; + import junit.framework.TestCase; import org.apache.hadoop.util.ExitUtil.ExitException; @@ -51,4 +54,30 @@ public class TestNativeLibraryChecker extends TestCase { } } + @Test + public void testNativeLibraryCheckerOutput(){ + expectOutput(new String[]{"-a"}); + // no argument + expectOutput(new String[0]); + } + + private void expectOutput(String [] args) { + ExitUtil.disableSystemExit(); + ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + PrintStream originalPs = System.out; + System.setOut(new PrintStream(outContent)); + try { + NativeLibraryChecker.main(args); + } catch (ExitException e) { + ExitUtil.resetFirstExitException(); + } finally { + if (Shell.WINDOWS) { + assertEquals(outContent.toString().indexOf("winutils: true") != -1, true); + } + if (NativeCodeLoader.isNativeCodeLoaded()) { + assertEquals(outContent.toString().indexOf("hadoop: true") != -1, true); + } + System.setOut(originalPs); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 33124b4d5e5..77229dd9d31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -153,9 +153,6 @@ Trunk (Unreleased) HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable directory. (Jing Zhao via wheat9) - HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via - Colin Patrick McCabe) - OPTIMIZATIONS BUG FIXES @@ -285,6 +282,8 @@ Trunk (Unreleased) HDFS-6893. crypto subcommand is not sorted properly in hdfs's hadoop_usage (David Luo via aw) + HDFS-6981. Fix DN upgrade with layout version change. (Arpit Agarwal) + Release 2.6.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -470,14 +469,15 @@ Release 2.6.0 - UNRELEASED HDFS-6376. Distcp data between two HA clusters requires another configuration. (Dave Marion and Haohui Mai via jing9) - HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv) - HDFS-6943. Improve NN allocateBlock log to include replicas' datanode IPs. (Ming Ma via wheat9) HDFS-6036. Forcibly timeout misbehaving DFSClients that try to do no-checksum reads that extend too long (cmccabe) + HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via + Colin Patrick McCabe) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) @@ -652,6 +652,12 @@ Release 2.6.0 - UNRELEASED HDFS-7005. DFS input streams do not timeout. + HDFS-6951. Correctly persist raw namespace xattrs to edit log and fsimage. + (clamb via wang) + + HDFS-6506. Newly moved block replica been invalidated and deleted in + TestBalancer. (Binglin Chang via cnauroth) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index ee49bcf488c..67994c899d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -537,9 +537,11 @@ public class Balancer { */ static int run(Collection namenodes, final Parameters p, Configuration conf) throws IOException, InterruptedException { - final long sleeptime = 2000*conf.getLong( - DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, - DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT); + final long sleeptime = + conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, + DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 + + conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000; LOG.info("namenodes = " + namenodes); LOG.info("parameters = " + p); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9b030e732a9..956900d32fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -165,7 +165,7 @@ public class BlockManager { final BlocksMap blocksMap; /** Replication thread. */ - Daemon replicationThread; + final Daemon replicationThread = new Daemon(new ReplicationMonitor()); /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); @@ -265,7 +265,6 @@ public class BlockManager { this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); - setReplicationMonitor(new ReplicationMonitor()); final long pendingPeriod = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, @@ -403,22 +402,6 @@ public class BlockManager { return storagePolicySuite.getPolicy(policyName); } - public long getReplicationRecheckInterval() { - return replicationRecheckInterval; - } - - public AtomicLong excessBlocksCount() { - return excessBlocksCount; - } - - public void clearInvalidateBlocks() { - invalidateBlocks.clear(); - } - - void setReplicationMonitor(Runnable replicationMonitor) { - replicationThread = new Daemon(replicationMonitor); - } - public void setBlockPoolId(String blockPoolId) { if (isBlockTokenEnabled()) { blockTokenSecretManager.setBlockPoolId(blockPoolId); @@ -1664,7 +1647,7 @@ public class BlockManager { * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ - void processPendingReplications() { + private void processPendingReplications() { Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { namesystem.writeLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 55d616f699b..709f060d237 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1053,7 +1053,7 @@ public class DatanodeManager { * 3. Added to exclude --> start decommission. * 4. Removed from exclude --> stop decommission. */ - void refreshDatanodes() { + private void refreshDatanodes() { for(DatanodeDescriptor node : datanodeMap.values()) { // Check if not include. if (!hostFileManager.isIncluded(node)) { @@ -1586,9 +1586,5 @@ public class DatanodeManager { public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) { this.shouldSendCachingCommands = shouldSendCachingCommands; } - - public HostFileManager getHostFileManager() { - return this.hostFileManager; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java index 7db23e41505..0b8d6c5bc16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java @@ -129,10 +129,6 @@ class HostFileManager { void refresh(String includeFile, String excludeFile) throws IOException { HostSet newIncludes = readFile("included", includeFile); HostSet newExcludes = readFile("excluded", excludeFile); - setHosts(newIncludes, newExcludes); - } - - void setHosts(HostSet newIncludes, HostSet newExcludes) { synchronized (this) { includes = newIncludes; excludes = newExcludes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 822c03d8c3f..3121496f5fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -474,11 +474,14 @@ class BPOfferService { * Signal the current rolling upgrade status as indicated by the NN. * @param inProgress true if a rolling upgrade is in progress */ - void signalRollingUpgrade(boolean inProgress) { + void signalRollingUpgrade(boolean inProgress) throws IOException { + String bpid = getBlockPoolId(); if (inProgress) { - dn.getFSDataset().enableTrash(getBlockPoolId()); + dn.getFSDataset().enableTrash(bpid); + dn.getFSDataset().setRollingUpgradeMarker(bpid); } else { - dn.getFSDataset().restoreTrash(getBlockPoolId()); + dn.getFSDataset().restoreTrash(bpid); + dn.getFSDataset().clearRollingUpgradeMarker(bpid); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 59ca11a5404..7d3068879b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -627,7 +627,7 @@ class BPServiceActor implements Runnable { bpos.shutdownActor(this); } - private void handleRollingUpgradeStatus(HeartbeatResponse resp) { + private void handleRollingUpgradeStatus(HeartbeatResponse resp) throws IOException { RollingUpgradeStatus rollingUpgradeStatus = resp.getRollingUpdateStatus(); if (rollingUpgradeStatus != null && rollingUpgradeStatus.getBlockPoolId().compareTo(bpos.getBlockPoolId()) != 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index b7f688dca4d..8333bb4af53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.HardLink; @@ -38,8 +39,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; +import java.util.List; import java.util.Properties; import java.util.Set; +import java.util.Collections; +import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -62,6 +66,18 @@ import java.util.regex.Pattern; public class BlockPoolSliceStorage extends Storage { static final String TRASH_ROOT_DIR = "trash"; + /** + * A marker file that is created on each root directory if a rolling upgrade + * is in progress. The NN does not inform the DN when a rolling upgrade is + * finalized. All the DN can infer is whether or not a rolling upgrade is + * currently in progress. When the rolling upgrade is not in progress: + * 1. If the marker file is present, then a rolling upgrade just completed. + * If a 'previous' directory exists, it can be deleted now. + * 2. If the marker file is absent, then a regular upgrade may be in + * progress. Do not delete the 'previous' directory. + */ + static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress"; + private static final String BLOCK_POOL_ID_PATTERN_BASE = Pattern.quote(File.separator) + "BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" + @@ -83,6 +99,13 @@ public class BlockPoolSliceStorage extends Storage { blockpoolID = bpid; } + /** + * These maps are used as an optimization to avoid one filesystem operation + * per storage on each heartbeat response. + */ + private static Set storagesWithRollingUpgradeMarker; + private static Set storagesWithoutRollingUpgradeMarker; + BlockPoolSliceStorage(int namespaceID, String bpID, long cTime, String clusterId) { super(NodeType.DATA_NODE); @@ -90,10 +113,18 @@ public class BlockPoolSliceStorage extends Storage { this.blockpoolID = bpID; this.cTime = cTime; this.clusterID = clusterId; + storagesWithRollingUpgradeMarker = Collections.newSetFromMap( + new ConcurrentHashMap()); + storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap( + new ConcurrentHashMap()); } private BlockPoolSliceStorage() { super(NodeType.DATA_NODE); + storagesWithRollingUpgradeMarker = Collections.newSetFromMap( + new ConcurrentHashMap()); + storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap( + new ConcurrentHashMap()); } /** @@ -270,13 +301,9 @@ public class BlockPoolSliceStorage extends Storage { private void doTransition(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) { - // we will already restore everything in the trash by rolling back to - // the previous directory, so we must delete the trash to ensure - // that it's not restored by BPOfferService.signalRollingUpgrade() - if (!FileUtil.fullyDelete(getTrashRootDir(sd))) { - throw new IOException("Unable to delete trash directory prior to " + - "restoration of previous directory: " + getTrashRootDir(sd)); - } + Preconditions.checkState(!getTrashRootDir(sd).exists(), + sd.getPreviousDir() + " and " + getTrashRootDir(sd) + " should not " + + " both be present."); doRollback(sd, nsInfo); // rollback if applicable } else { // Restore all the files in the trash. The restored files are retained @@ -440,10 +467,18 @@ public class BlockPoolSliceStorage extends Storage { } final File newChild = new File(restoreDirectory, child.getName()); - if (!child.renameTo(newChild)) { + + if (newChild.exists() && newChild.length() >= child.length()) { + // Failsafe - we should not hit this case but let's make sure + // we never overwrite a newer version of a block file with an + // older version. + LOG.info("Not overwriting " + newChild + " with smaller file from " + + "trash directory. This message can be safely ignored."); + } else if (!child.renameTo(newChild)) { throw new IOException("Failed to rename " + child + " to " + newChild); + } else { + ++filesRestored; } - ++filesRestored; } FileUtil.fullyDelete(trashRoot); return filesRestored; @@ -599,6 +634,18 @@ public class BlockPoolSliceStorage extends Storage { return new File(sd.getRoot(), TRASH_ROOT_DIR); } + /** + * Determine whether we can use trash for the given blockFile. Trash + * is disallowed if a 'previous' directory exists for the + * storage directory containing the block. + */ + @VisibleForTesting + public boolean isTrashAllowed(File blockFile) { + Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent()); + String previousDir = matcher.replaceFirst("$1$2" + STORAGE_DIR_PREVIOUS); + return !(new File(previousDir)).exists(); + } + /** * Get a target subdirectory under trash/ for a given block file that is being * deleted. @@ -609,9 +656,12 @@ public class BlockPoolSliceStorage extends Storage { * @return the trash directory for a given block file that is being deleted. */ public String getTrashDirectory(File blockFile) { - Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent()); - String trashDirectory = matcher.replaceFirst("$1$2" + TRASH_ROOT_DIR + "$4"); - return trashDirectory; + if (isTrashAllowed(blockFile)) { + Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent()); + String trashDirectory = matcher.replaceFirst("$1$2" + TRASH_ROOT_DIR + "$4"); + return trashDirectory; + } + return null; } /** @@ -638,6 +688,7 @@ public class BlockPoolSliceStorage extends Storage { for (StorageDirectory sd : storageDirs) { File trashRoot = getTrashRootDir(sd); try { + Preconditions.checkState(!(trashRoot.exists() && sd.getPreviousDir().exists())); restoreBlockFilesFromTrash(trashRoot); FileUtil.fullyDelete(getTrashRootDir(sd)); } catch (IOException ioe) { @@ -656,4 +707,49 @@ public class BlockPoolSliceStorage extends Storage { } return false; } + + /** + * Create a rolling upgrade marker file for each BP storage root, if it + * does not exist already. + */ + public void setRollingUpgradeMarkers(List dnStorageDirs) + throws IOException { + for (StorageDirectory sd : dnStorageDirs) { + File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir()); + File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE); + if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) { + if (!markerFile.exists() && markerFile.createNewFile()) { + LOG.info("Created " + markerFile); + } else { + LOG.info(markerFile + " already exists."); + } + storagesWithRollingUpgradeMarker.add(bpRoot.toString()); + storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString()); + } + } + } + + /** + * Check whether the rolling upgrade marker file exists for each BP storage + * root. If it does exist, then the marker file is cleared and more + * importantly the layout upgrade is finalized. + */ + public void clearRollingUpgradeMarkers(List dnStorageDirs) + throws IOException { + for (StorageDirectory sd : dnStorageDirs) { + File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir()); + File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE); + if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) { + if (markerFile.exists()) { + LOG.info("Deleting " + markerFile); + doFinalize(sd.getCurrentDir()); + if (!markerFile.delete()) { + LOG.warn("Failed to delete " + markerFile); + } + } + storagesWithoutRollingUpgradeMarker.add(bpRoot.toString()); + storagesWithRollingUpgradeMarker.remove(bpRoot.toString()); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index f382a9e7eb8..4383e56153a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -83,11 +83,17 @@ public class DataStorage extends Storage { public final static String STORAGE_DIR_FINALIZED = "finalized"; public final static String STORAGE_DIR_TMP = "tmp"; - // Set of bpids for which 'trash' is currently enabled. - // When trash is enabled block files are moved under a separate - // 'trash' folder instead of being deleted right away. This can - // be useful during rolling upgrades, for example. - // The set is backed by a concurrent HashMap. + /** + * Set of bpids for which 'trash' is currently enabled. + * When trash is enabled block files are moved under a separate + * 'trash' folder instead of being deleted right away. This can + * be useful during rolling upgrades, for example. + * The set is backed by a concurrent HashMap. + * + * Even if trash is enabled, it is not used if a layout upgrade + * is in progress for a storage directory i.e. if the previous + * directory exists. + */ private Set trashEnabledBpids; /** @@ -136,7 +142,9 @@ public class DataStorage extends Storage { } /** - * Enable trash for the specified block pool storage. + * Enable trash for the specified block pool storage. Even if trash is + * enabled by the caller, it is superseded by the 'previous' directory + * if a layout upgrade is in progress. */ public void enableTrash(String bpid) { if (trashEnabledBpids.add(bpid)) { @@ -156,6 +164,14 @@ public class DataStorage extends Storage { return trashEnabledBpids.contains(bpid); } + public void setRollingUpgradeMarker(String bpid) throws IOException { + getBPStorage(bpid).setRollingUpgradeMarkers(storageDirs); + } + + public void clearRollingUpgradeMarker(String bpid) throws IOException { + getBPStorage(bpid).clearRollingUpgradeMarkers(storageDirs); + } + /** * If rolling upgrades are in progress then do not delete block files * immediately. Instead we move the block files to an intermediate @@ -688,7 +704,8 @@ public class DataStorage extends Storage { if (DataNodeLayoutVersion.supports( LayoutVersion.Feature.FEDERATION, layoutVersion)) { // The VERSION file is already read in. Override the layoutVersion - // field and overwrite the file. + // field and overwrite the file. The upgrade work is handled by + // {@link BlockPoolSliceStorage#doUpgrade} LOG.info("Updating layout version from " + layoutVersion + " to " + HdfsConstants.DATANODE_LAYOUT_VERSION + " for storage " + sd.getRoot()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index 0fbfe190869..553208eeafd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -443,6 +443,17 @@ public interface FsDatasetSpi extends FSDatasetMBean { */ public boolean trashEnabled(String bpid); + /** + * Create a marker file indicating that a rolling upgrade is in progress. + */ + public void setRollingUpgradeMarker(String bpid) throws IOException; + + /** + * Delete the rolling upgrade marker file if it exists. + * @param bpid + */ + public void clearRollingUpgradeMarker(String bpid) throws IOException; + /** * submit a sync_file_range request to AsyncDiskService */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 4511f21c513..05c99143c8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -2039,6 +2039,16 @@ class FsDatasetImpl implements FsDatasetSpi { return dataStorage.trashEnabled(bpid); } + @Override + public void setRollingUpgradeMarker(String bpid) throws IOException { + dataStorage.setRollingUpgradeMarker(bpid); + } + + @Override + public void clearRollingUpgradeMarker(String bpid) throws IOException { + dataStorage.clearRollingUpgradeMarker(bpid); + } + @Override public RollingLogs createRollingLogs(String bpid, String prefix ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index a0e1f0ccac2..f00f132fc8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -111,6 +111,18 @@ public class EncryptionZoneManager { */ void addEncryptionZone(Long inodeId, String keyName) { assert dir.hasWriteLock(); + unprotectedAddEncryptionZone(inodeId, keyName); + } + + /** + * Add a new encryption zone. + *

+ * Does not assume that the FSDirectory lock is held. + * + * @param inodeId of the encryption zone + * @param keyName encryption zone key name + */ + void unprotectedAddEncryptionZone(Long inodeId, String keyName) { final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyName); encryptionZones.put(inodeId, ez); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index f541da50e4a..a2dde727e52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2158,7 +2158,7 @@ public class FSDirectory implements Closeable { for (XAttr xattr : xattrs) { final String xaName = XAttrHelper.getPrefixName(xattr); if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { - ezManager.addEncryptionZone(inode.getId(), + ezManager.unprotectedAddEncryptionZone(inode.getId(), new String(xattr.getValue())); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 1ddcf392993..321a14855ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -82,7 +82,12 @@ public final class FSImageFormatPBINode { private static final int XATTR_NAMESPACE_OFFSET = 30; private static final int XATTR_NAME_MASK = (1 << 24) - 1; private static final int XATTR_NAME_OFFSET = 6; - private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = + + /* See the comments in fsimage.proto for an explanation of the following. */ + private static final int XATTR_NAMESPACE_EXT_OFFSET = 5; + private static final int XATTR_NAMESPACE_EXT_MASK = 1; + + private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = XAttr.NameSpace.values(); @@ -122,6 +127,8 @@ public final class FSImageFormatPBINode { int v = xAttrCompactProto.getName(); int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK; int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK; + ns |= + ((v >> XATTR_NAMESPACE_EXT_OFFSET) & XATTR_NAMESPACE_EXT_MASK) << 2; String name = stringTable[nid]; byte[] value = null; if (xAttrCompactProto.getValue() != null) { @@ -371,10 +378,13 @@ public final class FSImageFormatPBINode { for (XAttr a : f.getXAttrs()) { XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto. newBuilder(); - int v = ((a.getNameSpace().ordinal() & XATTR_NAMESPACE_MASK) << - XATTR_NAMESPACE_OFFSET) - | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) << + int nsOrd = a.getNameSpace().ordinal(); + Preconditions.checkArgument(nsOrd < 8, "Too many namespaces."); + int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET) + | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) << XATTR_NAME_OFFSET); + v |= (((nsOrd >> 2) & XATTR_NAMESPACE_EXT_MASK) << + XATTR_NAMESPACE_EXT_OFFSET); xAttrCompactBuilder.setName(v); if (a.getValue() != null) { xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 3eea83dd1f6..b90e2d518a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -978,7 +978,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return Collections.unmodifiableList(auditLoggers); } - protected void loadFSImage(StartupOption startOpt) throws IOException { + private void loadFSImage(StartupOption startOpt) throws IOException { final FSImage fsImage = getFSImage(); // format before starting up if requested @@ -1026,7 +1026,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, imageLoadComplete(); } - protected void startSecretManager() { + private void startSecretManager() { if (dtSecretManager != null) { try { dtSecretManager.startThreads(); @@ -1038,7 +1038,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } } - protected void startSecretManagerIfNecessary() { + private void startSecretManagerIfNecessary() { boolean shouldRun = shouldUseDelegationTokens() && !isInSafeMode() && getEditLog().isOpenForWrite(); boolean running = dtSecretManager.isRunning(); @@ -1188,7 +1188,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return haEnabled && inActiveState() && startingActiveService; } - protected boolean shouldUseDelegationTokens() { + private boolean shouldUseDelegationTokens() { return UserGroupInformation.isSecurityEnabled() || alwaysUseDelegationTokensForTests; } @@ -2775,7 +2775,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @throws UnresolvedLinkException * @throws IOException */ - protected LocatedBlock prepareFileForWrite(String src, INodeFile file, String leaseHolder, String clientMachine, boolean writeToEditLog, @@ -3235,7 +3234,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return new FileState(pendingFile, src); } - protected LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs, long offset) throws IOException { LocatedBlock lBlk = new LocatedBlock( @@ -3354,8 +3352,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return true; } - protected INodeFile checkLease(String src, String holder, INode inode, - long fileId) + private INodeFile checkLease(String src, String holder, INode inode, + long fileId) throws LeaseExpiredException, FileNotFoundException { assert hasReadLock(); final String ident = src + " (inode " + fileId + ")"; @@ -4474,7 +4472,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return leaseManager.reassignLease(lease, src, newHolder); } - protected void commitOrCompleteLastBlock(final INodeFile fileINode, + private void commitOrCompleteLastBlock(final INodeFile fileINode, final Block commitBlock) throws IOException { assert hasWriteLock(); Preconditions.checkArgument(fileINode.isUnderConstruction()); @@ -4872,7 +4870,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @return an array of datanode commands * @throws IOException */ - protected HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress, int failedVolumes) @@ -4922,8 +4919,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @param file * @param logRetryCache */ - protected void persistBlocks(String path, INodeFile file, - boolean logRetryCache) { + private void persistBlocks(String path, INodeFile file, + boolean logRetryCache) { assert hasWriteLock(); Preconditions.checkArgument(file.isUnderConstruction()); getEditLog().logUpdateBlocks(path, file, logRetryCache); @@ -5348,7 +5345,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @param path * @param file */ - protected void persistNewBlock(String path, INodeFile file) { + private void persistNewBlock(String path, INodeFile file) { Preconditions.checkArgument(file.isUnderConstruction()); getEditLog().logAddBlock(path, file); if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -7226,7 +7223,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * * @return true if delegation token operation is allowed */ - protected boolean isAllowedDelegationTokenOp() throws IOException { + private boolean isAllowedDelegationTokenOp() throws IOException { AuthenticationMethod authMethod = getConnectionAuthenticationMethod(); if (UserGroupInformation.isSecurityEnabled() && (authMethod != AuthenticationMethod.KERBEROS) @@ -7393,13 +7390,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, final List live = new ArrayList(); blockManager.getDatanodeManager().fetchDatanodes(live, null, true); for (DatanodeDescriptor node : live) { - info.put(node.getHostName(), getLiveNodeInfo(node)); - } - return JSON.toString(info); - } - - protected Map getLiveNodeInfo(DatanodeDescriptor node) { - return ImmutableMap.builder() + Map innerinfo = ImmutableMap.builder() .put("infoAddr", node.getInfoAddr()) .put("infoSecureAddr", node.getInfoSecureAddr()) .put("xferaddr", node.getXferAddr()) @@ -7417,6 +7408,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, .put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()) .put("volfails", node.getVolumeFailures()) .build(); + + info.put(node.getHostName(), innerinfo); + } + return JSON.toString(info); } /** @@ -7701,16 +7696,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats, public ReentrantLock getLongReadLockForTests() { return fsLock.longReadLock; } + + @VisibleForTesting + public SafeModeInfo getSafeModeInfoForTests() { + return safeMode; + } @VisibleForTesting public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { this.nnResourceChecker = nnResourceChecker; } - public SafeModeInfo getSafeModeInfo() { - return safeMode; - } - @Override public boolean isAvoidingStaleDataNodesForWrite() { return this.blockManager.getDatanodeManager() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index c2896cfe9d0..512913b3acc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -67,8 +67,9 @@ public class NameNodeLayoutVersion { EDITLOG_LENGTH(-56, "Add length field to every edit log op"), XATTRS(-57, "Extended attributes"), CREATE_OVERWRITE(-58, "Use single editlog record for " + - "creating file with overwrite"), - BLOCK_STORAGE_POLICY(-59, "Block Storage policy"); + "creating file with overwrite"), + XATTRS_NAMESPACE_EXT(-59, "Increase number of xattr namespaces"), + BLOCK_STORAGE_POLICY(-60, "Block Storage policy"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto index a4b1af73097..588f6c86122 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto @@ -113,8 +113,12 @@ message INodeSection { * * [0:2) -- the namespace of XAttr (XAttrNamespaceProto) * [2:26) -- the name of the entry, which is an ID that points to a - * string in the StringTableSection. - * [26:32) -- reserved for future uses. + * string in the StringTableSection. + * [26:27) -- namespace extension. Originally there were only 4 namespaces + * so only 2 bits were needed. At that time, this bit was reserved. When a + * 5th namespace was created (raw) this bit became used as a 3rd namespace + * bit. + * [27:32) -- reserved for future uses. */ required fixed32 name = 1; optional bytes value = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index a1aa7d64ac4..e8a2484f2f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -83,6 +84,8 @@ import org.apache.hadoop.util.VersionInfo; import org.junit.Assume; import java.io.*; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; import java.net.*; import java.nio.ByteBuffer; import java.security.NoSuchAlgorithmException; @@ -1508,4 +1511,38 @@ public class DFSTestUtil { throws Exception { FsShellRun(cmd, 0, null, conf); } + + public static void addDataNodeLayoutVersion(final int lv, final String description) + throws NoSuchFieldException, IllegalAccessException { + Preconditions.checkState(lv < DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION); + + // Override {@link DataNodeLayoutVersion#CURRENT_LAYOUT_VERSION} via reflection. + Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + Field field = DataNodeLayoutVersion.class.getField("CURRENT_LAYOUT_VERSION"); + field.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.setInt(null, lv); + + // Override {@link HdfsConstants#DATANODE_LAYOUT_VERSION} + field = HdfsConstants.class.getField("DATANODE_LAYOUT_VERSION"); + field.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.setInt(null, lv); + + // Inject the feature into the FEATURES map. + final LayoutVersion.FeatureInfo featureInfo = + new LayoutVersion.FeatureInfo(lv, lv + 1, description, false); + final LayoutVersion.LayoutFeature feature = + new LayoutVersion.LayoutFeature() { + @Override + public LayoutVersion.FeatureInfo getInfo() { + return featureInfo; + } + }; + + // Update the FEATURES map with the new layout version. + LayoutVersion.updateMap(DataNodeLayoutVersion.FEATURES, + new LayoutVersion.LayoutFeature[] { feature }); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 0ef538d80b7..d0d29ea6d79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector; import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.security.AccessControlException; @@ -314,6 +315,13 @@ public class TestEncryptionZones { assertNumZones(numZones); assertZonePresent(null, zonePath.toString()); } + + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + cluster.restartNameNode(true); + assertNumZones(numZones); + assertZonePresent(null, zone1.toString()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index d509668bdc3..72597d2b759 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -72,7 +72,7 @@ public class TestBalancer { ((Log4JLogger)Balancer.LOG).getLogger().setLevel(Level.ALL); } - final static long CAPACITY = 500L; + final static long CAPACITY = 5000L; final static String RACK0 = "/rack0"; final static String RACK1 = "/rack1"; final static String RACK2 = "/rack2"; @@ -85,7 +85,7 @@ public class TestBalancer { static final long TIMEOUT = 40000L; //msec static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5% static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta - static final int DEFAULT_BLOCK_SIZE = 10; + static final int DEFAULT_BLOCK_SIZE = 100; private static final Random r = new Random(); static { @@ -96,6 +96,7 @@ public class TestBalancer { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); SimulatedFSDataset.setFactory(conf); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index a51342ea751..8ad4510d0d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -1072,6 +1072,14 @@ public class SimulatedFSDataset implements FsDatasetSpi { return false; } + @Override + public void setRollingUpgradeMarker(String bpid) { + } + + @Override + public void clearRollingUpgradeMarker(String bpid) { + } + @Override public void checkAndUpdate(String bpid, long blockId, File diskFile, File diskMetaFile, FsVolumeSpi vol) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java index f58f4710825..befb298e3b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -28,6 +29,8 @@ import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; import java.util.Random; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -47,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; /** @@ -64,7 +68,7 @@ public class TestDataNodeRollingUpgrade { Configuration conf; MiniDFSCluster cluster = null; DistributedFileSystem fs = null; - DataNode dn = null; + DataNode dn0 = null; NameNode nn = null; String blockPoolId = null; @@ -76,8 +80,8 @@ public class TestDataNodeRollingUpgrade { fs = cluster.getFileSystem(); nn = cluster.getNameNode(0); assertNotNull(nn); - dn = cluster.getDataNodes().get(0); - assertNotNull(dn); + dn0 = cluster.getDataNodes().get(0); + assertNotNull(dn0); blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId(); } @@ -88,7 +92,7 @@ public class TestDataNodeRollingUpgrade { } fs = null; nn = null; - dn = null; + dn0 = null; blockPoolId = null; } @@ -103,9 +107,10 @@ public class TestDataNodeRollingUpgrade { private File getBlockForFile(Path path, boolean exists) throws IOException { LocatedBlocks blocks = nn.getRpcServer().getBlockLocations(path.toString(), 0, Long.MAX_VALUE); - assertEquals(1, blocks.getLocatedBlocks().size()); + assertEquals("The test helper functions assume that each file has a single block", + 1, blocks.getLocatedBlocks().size()); ExtendedBlock block = blocks.getLocatedBlocks().get(0).getBlock(); - BlockLocalPathInfo bInfo = dn.getFSDataset().getBlockLocalPathInfo(block); + BlockLocalPathInfo bInfo = dn0.getFSDataset().getBlockLocalPathInfo(block); File blockFile = new File(bInfo.getBlockPath()); assertEquals(exists, blockFile.exists()); return blockFile; @@ -113,7 +118,7 @@ public class TestDataNodeRollingUpgrade { private File getTrashFileForBlock(File blockFile, boolean exists) { File trashFile = new File( - dn.getStorage().getTrashDirectoryForBlockFile(blockPoolId, blockFile)); + dn0.getStorage().getTrashDirectoryForBlockFile(blockPoolId, blockFile)); assertEquals(exists, trashFile.exists()); return trashFile; } @@ -135,11 +140,10 @@ public class TestDataNodeRollingUpgrade { assertFalse(blockFile.exists()); } - private void ensureTrashDisabled() { + private boolean isTrashRootPresent() { // Trash is disabled; trash root does not exist - assertFalse(dn.getFSDataset().trashEnabled(blockPoolId)); - BlockPoolSliceStorage bps = dn.getStorage().getBPStorage(blockPoolId); - assertFalse(bps.trashEnabled()); + BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId); + return bps.trashEnabled(); } /** @@ -149,17 +153,25 @@ public class TestDataNodeRollingUpgrade { throws Exception { assertTrue(blockFile.exists()); assertFalse(trashFile.exists()); - ensureTrashDisabled(); + assertFalse(isTrashRootPresent()); + } + + private boolean isBlockFileInPrevious(File blockFile) { + Pattern blockFilePattern = Pattern.compile("^(.*/current/.*/)(current)(/.*)$"); + Matcher matcher = blockFilePattern.matcher(blockFile.toString()); + String previousFileName = matcher.replaceFirst("$1" + "previous" + "$3"); + return ((new File(previousFileName)).exists()); } private void startRollingUpgrade() throws Exception { LOG.info("Starting rolling upgrade"); + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); final DFSAdmin dfsadmin = new DFSAdmin(conf); TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "prepare"); triggerHeartBeats(); // Ensure datanode rolling upgrade is started - assertTrue(dn.getFSDataset().trashEnabled(blockPoolId)); + assertTrue(dn0.getFSDataset().trashEnabled(blockPoolId)); } private void finalizeRollingUpgrade() throws Exception { @@ -169,8 +181,8 @@ public class TestDataNodeRollingUpgrade { triggerHeartBeats(); // Ensure datanode rolling upgrade is started - assertFalse(dn.getFSDataset().trashEnabled(blockPoolId)); - BlockPoolSliceStorage bps = dn.getStorage().getBPStorage(blockPoolId); + assertFalse(dn0.getFSDataset().trashEnabled(blockPoolId)); + BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId); assertFalse(bps.trashEnabled()); } @@ -179,13 +191,15 @@ public class TestDataNodeRollingUpgrade { // Restart the namenode with rolling upgrade rollback LOG.info("Starting rollback of the rolling upgrade"); MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0); + dnprop.setDnArgs("-rollback"); cluster.shutdownNameNodes(); cluster.restartNameNode("-rollingupgrade", "rollback"); cluster.restartDataNode(dnprop); cluster.waitActive(); nn = cluster.getNameNode(0); - dn = cluster.getDataNodes().get(0); + dn0 = cluster.getDataNodes().get(0); triggerHeartBeats(); + LOG.info("The cluster is active after rollback"); } @Test (timeout=600000) @@ -194,12 +208,11 @@ public class TestDataNodeRollingUpgrade { startCluster(); // Create files in DFS. - Path testFile1 = new Path("/TestDataNodeRollingUpgrade1.dat"); - Path testFile2 = new Path("/TestDataNodeRollingUpgrade2.dat"); + Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat"); + Path testFile2 = new Path("/" + GenericTestUtils.getMethodName() + ".02.dat"); DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED); DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED); - fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); startRollingUpgrade(); File blockFile = getBlockForFile(testFile2, true); File trashFile = getTrashFileForBlock(blockFile, false); @@ -207,7 +220,7 @@ public class TestDataNodeRollingUpgrade { finalizeRollingUpgrade(); // Ensure that delete file testFile2 stays deleted after finalize - ensureTrashDisabled(); + assertFalse(isTrashRootPresent()); assert(!fs.exists(testFile2)); assert(fs.exists(testFile1)); @@ -222,11 +235,10 @@ public class TestDataNodeRollingUpgrade { startCluster(); // Create files in DFS. - Path testFile1 = new Path("/TestDataNodeRollingUpgrade1.dat"); + Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat"); DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED); String fileContents1 = DFSTestUtil.readFile(fs, testFile1); - fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); startRollingUpgrade(); File blockFile = getBlockForFile(testFile1, true); @@ -255,9 +267,9 @@ public class TestDataNodeRollingUpgrade { startCluster(); // Create files in DFS. - String testFile1 = "/TestDataNodeXceiver1.dat"; - String testFile2 = "/TestDataNodeXceiver2.dat"; - String testFile3 = "/TestDataNodeXceiver3.dat"; + String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat"; + String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat"; + String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat"; DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf); @@ -277,12 +289,12 @@ public class TestDataNodeRollingUpgrade { s3.write(toWrite, 0, 1024*1024*8); s3.flush(); - assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer() + assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); s1.close(); s2.close(); s3.close(); - assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer() + assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); client1.close(); client2.close(); @@ -291,4 +303,143 @@ public class TestDataNodeRollingUpgrade { shutdownCluster(); } } + + /** + * Support for layout version change with rolling upgrade was + * added by HDFS-6800 and HDFS-6981. + */ + @Test(timeout=300000) + public void testWithLayoutChangeAndFinalize() throws Exception { + final long seed = 0x600DF00D; + try { + startCluster(); + + Path[] paths = new Path[3]; + File[] blockFiles = new File[3]; + + // Create two files in DFS. + for (int i = 0; i < 2; ++i) { + paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat"); + DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 2, seed); + } + + startRollingUpgrade(); + + // Delete the first file. The DN will save its block files in trash. + blockFiles[0] = getBlockForFile(paths[0], true); + File trashFile0 = getTrashFileForBlock(blockFiles[0], false); + deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0); + + // Restart the DN with a new layout version to trigger layout upgrade. + LOG.info("Shutting down the Datanode"); + MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0); + DFSTestUtil.addDataNodeLayoutVersion( + DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1, + "Test Layout for TestDataNodeRollingUpgrade"); + LOG.info("Restarting the DataNode"); + cluster.restartDataNode(dnprop, true); + cluster.waitActive(); + + dn0 = cluster.getDataNodes().get(0); + LOG.info("The DN has been restarted"); + assertFalse(trashFile0.exists()); + assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0])); + + // Ensure that the block file for the first file was moved from 'trash' to 'previous'. + assertTrue(isBlockFileInPrevious(blockFiles[0])); + assertFalse(isTrashRootPresent()); + + // Delete the second file. Ensure that its block file is in previous. + blockFiles[1] = getBlockForFile(paths[1], true); + fs.delete(paths[1], false); + assertTrue(isBlockFileInPrevious(blockFiles[1])); + assertFalse(isTrashRootPresent()); + + // Rollback and ensure that neither block file exists in trash or previous. + finalizeRollingUpgrade(); + assertFalse(isTrashRootPresent()); + assertFalse(isBlockFileInPrevious(blockFiles[0])); + assertFalse(isBlockFileInPrevious(blockFiles[1])); + } finally { + shutdownCluster(); + } + } + + /** + * Support for layout version change with rolling upgrade was + * added by HDFS-6800 and HDFS-6981. + */ + @Test(timeout=300000) + public void testWithLayoutChangeAndRollback() throws Exception { + final long seed = 0x600DF00D; + try { + startCluster(); + + Path[] paths = new Path[3]; + File[] blockFiles = new File[3]; + + // Create two files in DFS. + for (int i = 0; i < 2; ++i) { + paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat"); + DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 1, seed); + } + + startRollingUpgrade(); + + // Delete the first file. The DN will save its block files in trash. + blockFiles[0] = getBlockForFile(paths[0], true); + File trashFile0 = getTrashFileForBlock(blockFiles[0], false); + deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0); + + // Restart the DN with a new layout version to trigger layout upgrade. + LOG.info("Shutting down the Datanode"); + MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0); + DFSTestUtil.addDataNodeLayoutVersion( + DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1, + "Test Layout for TestDataNodeRollingUpgrade"); + LOG.info("Restarting the DataNode"); + cluster.restartDataNode(dnprop, true); + cluster.waitActive(); + + dn0 = cluster.getDataNodes().get(0); + LOG.info("The DN has been restarted"); + assertFalse(trashFile0.exists()); + assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0])); + + // Ensure that the block file for the first file was moved from 'trash' to 'previous'. + assertTrue(isBlockFileInPrevious(blockFiles[0])); + assertFalse(isTrashRootPresent()); + + // Delete the second file. Ensure that its block file is in previous. + blockFiles[1] = getBlockForFile(paths[1], true); + fs.delete(paths[1], false); + assertTrue(isBlockFileInPrevious(blockFiles[1])); + assertFalse(isTrashRootPresent()); + + // Create and delete a third file. Its block file should not be + // in either trash or previous after deletion. + paths[2] = new Path("/" + GenericTestUtils.getMethodName() + ".2.dat"); + DFSTestUtil.createFile(fs, paths[2], BLOCK_SIZE, (short) 1, seed); + blockFiles[2] = getBlockForFile(paths[2], true); + fs.delete(paths[2], false); + assertFalse(isBlockFileInPrevious(blockFiles[2])); + assertFalse(isTrashRootPresent()); + + // Rollback and ensure that the first two file contents were restored. + rollbackRollingUpgrade(); + for (int i = 0; i < 2; ++i) { + byte[] actual = DFSTestUtil.readFileBuffer(fs, paths[i]); + byte[] calculated = DFSTestUtil.calculateFileContentsFromSeed(seed, BLOCK_SIZE); + assertArrayEquals(actual, calculated); + } + + // And none of the block files must be in previous or trash. + assertFalse(isTrashRootPresent()); + for (int i = 0; i < 3; ++i) { + assertFalse(isBlockFileInPrevious(blockFiles[i])); + } + } finally { + shutdownCluster(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index 0c7b8070b44..9c484006247 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -56,6 +56,7 @@ import org.junit.BeforeClass; import org.junit.Test; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; /** * Tests NameNode interaction for all XAttr APIs. @@ -129,51 +130,73 @@ public class FSXAttrBaseTest { */ @Test(timeout = 120000) public void testCreateXAttr() throws Exception { - FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); - fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + Map expectedXAttrs = Maps.newHashMap(); + expectedXAttrs.put(name1, value1); + expectedXAttrs.put(name2, null); + doTestCreateXAttr(path, expectedXAttrs); + expectedXAttrs.put(raw1, value1); + doTestCreateXAttr(rawPath, expectedXAttrs); + } + + private void doTestCreateXAttr(Path usePath, Map expectedXAttrs) throws Exception { + FileSystem.mkdirs(fs, usePath, FsPermission.createImmutable((short)0750)); + fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); - Map xattrs = fs.getXAttrs(path); + Map xattrs = fs.getXAttrs(usePath); Assert.assertEquals(xattrs.size(), 1); Assert.assertArrayEquals(value1, xattrs.get(name1)); - fs.removeXAttr(path, name1); + fs.removeXAttr(usePath, name1); - xattrs = fs.getXAttrs(path); + xattrs = fs.getXAttrs(usePath); Assert.assertEquals(xattrs.size(), 0); // Create xattr which already exists. - fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); try { - fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); Assert.fail("Creating xattr which already exists should fail."); } catch (IOException e) { } - fs.removeXAttr(path, name1); + fs.removeXAttr(usePath, name1); - // Create two xattrs - fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); - fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.CREATE)); - xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + // Create the xattrs + for (Map.Entry ent : expectedXAttrs.entrySet()) { + fs.setXAttr(usePath, ent.getKey(), ent.getValue(), + EnumSet.of(XAttrSetFlag.CREATE)); + } + xattrs = fs.getXAttrs(usePath); + Assert.assertEquals(xattrs.size(), expectedXAttrs.size()); + for (Map.Entry ent : expectedXAttrs.entrySet()) { + final byte[] val = + (ent.getValue() == null) ? new byte[0] : ent.getValue(); + Assert.assertArrayEquals(val, xattrs.get(ent.getKey())); + } restart(false); initFileSystem(); - xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + xattrs = fs.getXAttrs(usePath); + Assert.assertEquals(xattrs.size(), expectedXAttrs.size()); + for (Map.Entry ent : expectedXAttrs.entrySet()) { + final byte[] val = + (ent.getValue() == null) ? new byte[0] : ent.getValue(); + Assert.assertArrayEquals(val, xattrs.get(ent.getKey())); + } restart(true); initFileSystem(); - xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); - - fs.removeXAttr(path, name1); - fs.removeXAttr(path, name2); + xattrs = fs.getXAttrs(usePath); + Assert.assertEquals(xattrs.size(), expectedXAttrs.size()); + for (Map.Entry ent : expectedXAttrs.entrySet()) { + final byte[] val = + (ent.getValue() == null) ? new byte[0] : ent.getValue(); + Assert.assertArrayEquals(val, xattrs.get(ent.getKey())); + } + + for (Map.Entry ent : expectedXAttrs.entrySet()) { + fs.removeXAttr(usePath, ent.getKey()); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index d65d1ff5be1..c32ed67d6e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -223,7 +223,7 @@ public class NameNodeAdapter { * if safemode is not running. */ public static int getSafeModeSafeBlocks(NameNode nn) { - SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo(); + SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests(); if (smi == null) { return -1; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 3b66d43e8d0..ecfbb9f5fca 100644 Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index c8eecc30006..8cafa9f2fa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1,6 +1,6 @@ - -59 + -60 OP_START_LOG_SEGMENT @@ -13,8 +13,8 @@ 2 1 - 1410634509611 - 201e2572e03d089c + 1410915997709 + 309e81e09dc6c75a @@ -24,8 +24,8 @@ 3 2 - 1410634509614 - ef4542e27fb38301 + 1410915997711 + 8a2399843e754bee @@ -37,10 +37,10 @@ 16386 /file_create 1 - 1409943310217 - 1409943310217 + 1410224798292 + 1410224798292 512 - DFSClient_NONMAPREDUCE_588677218_1 + DFSClient_NONMAPREDUCE_1374813776_1 127.0.0.1 true @@ -48,7 +48,7 @@ supergroup 420 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 5 @@ -60,8 +60,8 @@ 0 /file_create 1 - 1409943310240 - 1409943310217 + 1410224798315 + 1410224798292 512 @@ -88,8 +88,8 @@ 0 /file_create /file_moved - 1409943310247 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + 1410224798322 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 8 @@ -99,8 +99,8 @@ 8 0 /file_moved - 1409943310253 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + 1410224798328 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 9 @@ -111,7 +111,7 @@ 0 16387 /directory_mkdir - 1409943310260 + 1410224798335 jing supergroup @@ -146,7 +146,7 @@ 13 /directory_mkdir snapshot1 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 14 @@ -157,7 +157,7 @@ /directory_mkdir snapshot1 snapshot2 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 15 @@ -167,7 +167,7 @@ 15 /directory_mkdir snapshot2 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 16 @@ -179,10 +179,10 @@ 16388 /file_create 1 - 1409943310284 - 1409943310284 + 1410224798359 + 1410224798359 512 - DFSClient_NONMAPREDUCE_588677218_1 + DFSClient_NONMAPREDUCE_1374813776_1 127.0.0.1 true @@ -190,7 +190,7 @@ supergroup 420 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 17 @@ -202,8 +202,8 @@ 0 /file_create 1 - 1409943310286 - 1409943310284 + 1410224798361 + 1410224798359 512 @@ -265,9 +265,9 @@ 0 /file_create /file_moved - 1409943310302 + 1410224798379 NONE - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 24 @@ -279,10 +279,10 @@ 16389 /file_concat_target 1 - 1409943310306 - 1409943310306 + 1410224798382 + 1410224798382 512 - DFSClient_NONMAPREDUCE_588677218_1 + DFSClient_NONMAPREDUCE_1374813776_1 127.0.0.1 true @@ -290,7 +290,7 @@ supergroup 420 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 26 @@ -396,8 +396,8 @@ 0 /file_concat_target 1 - 1409943310402 - 1409943310306 + 1410224798476 + 1410224798382 512 @@ -432,10 +432,10 @@ 16390 /file_concat_0 1 - 1409943310407 - 1409943310407 + 1410224798479 + 1410224798479 512 - DFSClient_NONMAPREDUCE_588677218_1 + DFSClient_NONMAPREDUCE_1374813776_1 127.0.0.1 true @@ -443,7 +443,7 @@ supergroup 420 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 39 @@ -549,8 +549,8 @@ 0 /file_concat_0 1 - 1409943310430 - 1409943310407 + 1410224798501 + 1410224798479 512 @@ -585,10 +585,10 @@ 16391 /file_concat_1 1 - 1409943310434 - 1409943310434 + 1410224798504 + 1410224798504 512 - DFSClient_NONMAPREDUCE_588677218_1 + DFSClient_NONMAPREDUCE_1374813776_1 127.0.0.1 true @@ -596,7 +596,7 @@ supergroup 420 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 51 @@ -702,8 +702,8 @@ 0 /file_concat_1 1 - 1409943310457 - 1409943310434 + 1410224798530 + 1410224798504 512 @@ -736,12 +736,12 @@ 57 0 /file_concat_target - 1409943310460 + 1410224798533 /file_concat_0 /file_concat_1 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 62 @@ -753,14 +753,14 @@ 16392 /file_symlink /file_concat_target - 1409943310463 - 1409943310463 + 1410224798537 + 1410224798537 jing supergroup 511 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 63 @@ -772,10 +772,10 @@ 16393 /hard-lease-recovery-test 1 - 1409943310466 - 1409943310466 + 1410224798540 + 1410224798540 512 - DFSClient_NONMAPREDUCE_588677218_1 + DFSClient_NONMAPREDUCE_1374813776_1 127.0.0.1 true @@ -783,7 +783,7 @@ supergroup 420 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 64 @@ -840,7 +840,7 @@ OP_REASSIGN_LEASE 65 - DFSClient_NONMAPREDUCE_588677218_1 + DFSClient_NONMAPREDUCE_1374813776_1 /hard-lease-recovery-test HDFS_NameNode @@ -853,8 +853,8 @@ 0 /hard-lease-recovery-test 1 - 1409943313176 - 1409943310466 + 1410224801265 + 1410224798540 512 @@ -881,7 +881,7 @@ 493 9223372036854775807 2305843009213693951 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 71 @@ -891,7 +891,7 @@ 68 pool1 99 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 72 @@ -903,8 +903,8 @@ /path 1 pool1 - 2305844419157007447 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + 2305844419438495525 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 73 @@ -914,7 +914,7 @@ 70 1 2 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 74 @@ -923,7 +923,7 @@ 71 1 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 75 @@ -932,7 +932,7 @@ 72 pool1 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 76 @@ -979,7 +979,7 @@ a1 0x313233 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 78 @@ -993,7 +993,7 @@ a2 0x373839 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 79 @@ -1006,7 +1006,7 @@ USER a2 - e33c0cc9-12b1-49a2-80eb-4d35d3b79960 + b53e8d0a-8d92-4067-b8c8-637ac951bac2 80 @@ -1014,14 +1014,14 @@ OP_ROLLING_UPGRADE_START 77 - 1409943313537 + 1410224801616 OP_ROLLING_UPGRADE_FINALIZE 78 - 1409943313537 + 1410224801616 diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java index fb702059ade..da267a1c1b5 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java @@ -179,26 +179,8 @@ public class MRAMSimulator extends AMSimulator { return rm.getApplicationMasterService().allocate(request); } }); - - // waiting until the AM container is allocated - while (true) { - if (response != null && ! response.getAllocatedContainers().isEmpty()) { - // get AM container - Container container = response.getAllocatedContainers().get(0); - se.getNmMap().get(container.getNodeId()) - .addNewContainer(container, -1L); - // start AM container - amContainer = container; - LOG.debug(MessageFormat.format("Application {0} starts its " + - "AM container ({1}).", appId, amContainer.getId())); - isAMContainerRunning = true; - break; - } - // this sleep time is different from HeartBeat - Thread.sleep(1000); - // send out empty request - sendContainerRequest(); - response = responseQueue.take(); + if (response != null) { + responseQueue.put(response); } } @@ -206,6 +188,26 @@ public class MRAMSimulator extends AMSimulator { @SuppressWarnings("unchecked") protected void processResponseQueue() throws InterruptedException, YarnException, IOException { + // Check whether receive the am container + if (!isAMContainerRunning) { + if (!responseQueue.isEmpty()) { + AllocateResponse response = responseQueue.take(); + if (response != null + && !response.getAllocatedContainers().isEmpty()) { + // Get AM container + Container container = response.getAllocatedContainers().get(0); + se.getNmMap().get(container.getNodeId()) + .addNewContainer(container, -1L); + // Start AM container + amContainer = container; + LOG.debug(MessageFormat.format("Application {0} starts its " + + "AM container ({1}).", appId, amContainer.getId())); + isAMContainerRunning = true; + } + } + return; + } + while (! responseQueue.isEmpty()) { AllocateResponse response = responseQueue.take(); @@ -262,6 +264,7 @@ public class MRAMSimulator extends AMSimulator { LOG.debug(MessageFormat.format("Application {0} sends out event " + "to clean up its AM container.", appId)); isFinished = true; + break; } // check allocated containers diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index d54fcd6fef1..7eaf1c805d0 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -15,6 +15,9 @@ Trunk - Unreleased YARN-524 TestYarnVersionInfo failing if generated properties doesn't include an SVN URL. (stevel) + YARN-1471. The SLS simulator is not running the preemption policy + for CapacityScheduler (Carlo Curino via cdouglas) + YARN-2216 TestRMApplicationHistoryWriter sometimes fails in trunk. (Zhijie Shen via xgong) @@ -299,6 +302,9 @@ Release 2.6.0 - UNRELEASED YARN-2519. Credential Provider related unit tests failed on Windows. (Xiaoyu Yao via cnauroth) + YARN-2526. SLS can deadlock when all the threads are taken by AMSimulators. + (Wei Yan via kasha) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES