Merge changes from trunk
This commit is contained in:
commit
db41a1b7b9
|
@ -319,9 +319,6 @@ Trunk (Unreleased)
|
|||
HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
|
||||
System. (Shanyu Zhao via cnauroth)
|
||||
|
||||
HADOOP-10925. Compilation fails in native link0 function on Windows.
|
||||
(cnauroth)
|
||||
|
||||
HADOOP-11002. shell escapes are incompatible with previous releases (aw)
|
||||
|
||||
HADOOP-10996. Stop violence in the *_HOME (aw)
|
||||
|
@ -509,6 +506,9 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HADOOP-11070. Create MiniKMS for testing. (tucu)
|
||||
|
||||
HADOOP-11057. checknative command to probe for winutils.exe on windows.
|
||||
(Xiaoyu Yao via cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
||||
|
@ -774,6 +774,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-11071. KMSClientProvider should drain the local generated EEK cache
|
||||
on key rollover. (tucu)
|
||||
|
||||
HADOOP-10925. Compilation fails in native link0 function on Windows.
|
||||
(cnauroth)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -37,7 +37,8 @@ public class NativeLibraryChecker {
|
|||
public static void main(String[] args) {
|
||||
String usage = "NativeLibraryChecker [-a|-h]\n"
|
||||
+ " -a use -a to check all libraries are available\n"
|
||||
+ " by default just check hadoop library is available\n"
|
||||
+ " by default just check hadoop library (and\n"
|
||||
+ " winutils.exe on Windows OS) is available\n"
|
||||
+ " exit with error code 1 if check failed\n"
|
||||
+ " -h print this message\n";
|
||||
if (args.length > 1 ||
|
||||
|
@ -62,12 +63,16 @@ public class NativeLibraryChecker {
|
|||
boolean lz4Loaded = nativeHadoopLoaded;
|
||||
boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf);
|
||||
boolean openSslLoaded = false;
|
||||
boolean winutilsExists = false;
|
||||
|
||||
String openSslDetail = "";
|
||||
String hadoopLibraryName = "";
|
||||
String zlibLibraryName = "";
|
||||
String snappyLibraryName = "";
|
||||
String lz4LibraryName = "";
|
||||
String bzip2LibraryName = "";
|
||||
String winutilsPath = null;
|
||||
|
||||
if (nativeHadoopLoaded) {
|
||||
hadoopLibraryName = NativeCodeLoader.getLibraryName();
|
||||
zlibLoaded = ZlibFactory.isNativeZlibLoaded(conf);
|
||||
|
@ -93,6 +98,15 @@ public class NativeLibraryChecker {
|
|||
bzip2LibraryName = Bzip2Factory.getLibraryName(conf);
|
||||
}
|
||||
}
|
||||
|
||||
// winutils.exe is required on Windows
|
||||
winutilsPath = Shell.getWinUtilsPath();
|
||||
if (winutilsPath != null) {
|
||||
winutilsExists = true;
|
||||
} else {
|
||||
winutilsPath = "";
|
||||
}
|
||||
|
||||
System.out.println("Native library checking:");
|
||||
System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName);
|
||||
System.out.printf("zlib: %b %s\n", zlibLoaded, zlibLibraryName);
|
||||
|
@ -100,7 +114,11 @@ public class NativeLibraryChecker {
|
|||
System.out.printf("lz4: %b %s\n", lz4Loaded, lz4LibraryName);
|
||||
System.out.printf("bzip2: %b %s\n", bzip2Loaded, bzip2LibraryName);
|
||||
System.out.printf("openssl: %b %s\n", openSslLoaded, openSslDetail);
|
||||
if ((!nativeHadoopLoaded) ||
|
||||
if (Shell.WINDOWS) {
|
||||
System.out.printf("winutils: %b %s\n", winutilsExists, winutilsPath);
|
||||
}
|
||||
|
||||
if ((!nativeHadoopLoaded) || (Shell.WINDOWS && (!winutilsExists)) ||
|
||||
(checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) {
|
||||
// return 1 to indicated check failed
|
||||
ExitUtil.terminate(1);
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.util.ExitUtil.ExitException;
|
||||
|
@ -51,4 +54,30 @@ public class TestNativeLibraryChecker extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNativeLibraryCheckerOutput(){
|
||||
expectOutput(new String[]{"-a"});
|
||||
// no argument
|
||||
expectOutput(new String[0]);
|
||||
}
|
||||
|
||||
private void expectOutput(String [] args) {
|
||||
ExitUtil.disableSystemExit();
|
||||
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
|
||||
PrintStream originalPs = System.out;
|
||||
System.setOut(new PrintStream(outContent));
|
||||
try {
|
||||
NativeLibraryChecker.main(args);
|
||||
} catch (ExitException e) {
|
||||
ExitUtil.resetFirstExitException();
|
||||
} finally {
|
||||
if (Shell.WINDOWS) {
|
||||
assertEquals(outContent.toString().indexOf("winutils: true") != -1, true);
|
||||
}
|
||||
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
||||
assertEquals(outContent.toString().indexOf("hadoop: true") != -1, true);
|
||||
}
|
||||
System.setOut(originalPs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -153,9 +153,6 @@ Trunk (Unreleased)
|
|||
HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable
|
||||
directory. (Jing Zhao via wheat9)
|
||||
|
||||
HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via
|
||||
Colin Patrick McCabe)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -285,6 +282,8 @@ Trunk (Unreleased)
|
|||
HDFS-6893. crypto subcommand is not sorted properly in hdfs's hadoop_usage
|
||||
(David Luo via aw)
|
||||
|
||||
HDFS-6981. Fix DN upgrade with layout version change. (Arpit Agarwal)
|
||||
|
||||
Release 2.6.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -470,14 +469,15 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6376. Distcp data between two HA clusters requires another configuration.
|
||||
(Dave Marion and Haohui Mai via jing9)
|
||||
|
||||
HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
|
||||
|
||||
HDFS-6943. Improve NN allocateBlock log to include replicas' datanode IPs.
|
||||
(Ming Ma via wheat9)
|
||||
|
||||
HDFS-6036. Forcibly timeout misbehaving DFSClients that try to do
|
||||
no-checksum reads that extend too long (cmccabe)
|
||||
|
||||
HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via
|
||||
Colin Patrick McCabe)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
@ -652,6 +652,12 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HDFS-7005. DFS input streams do not timeout.
|
||||
|
||||
HDFS-6951. Correctly persist raw namespace xattrs to edit log and fsimage.
|
||||
(clamb via wang)
|
||||
|
||||
HDFS-6506. Newly moved block replica been invalidated and deleted in
|
||||
TestBalancer. (Binglin Chang via cnauroth)
|
||||
|
||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
||||
|
|
|
@ -537,9 +537,11 @@ public class Balancer {
|
|||
*/
|
||||
static int run(Collection<URI> namenodes, final Parameters p,
|
||||
Configuration conf) throws IOException, InterruptedException {
|
||||
final long sleeptime = 2000*conf.getLong(
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
|
||||
final long sleeptime =
|
||||
conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 +
|
||||
conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
|
||||
LOG.info("namenodes = " + namenodes);
|
||||
LOG.info("parameters = " + p);
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ public class BlockManager {
|
|||
final BlocksMap blocksMap;
|
||||
|
||||
/** Replication thread. */
|
||||
Daemon replicationThread;
|
||||
final Daemon replicationThread = new Daemon(new ReplicationMonitor());
|
||||
|
||||
/** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
|
||||
final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
|
||||
|
@ -265,7 +265,6 @@ public class BlockManager {
|
|||
this.namesystem = namesystem;
|
||||
datanodeManager = new DatanodeManager(this, namesystem, conf);
|
||||
heartbeatManager = datanodeManager.getHeartbeatManager();
|
||||
setReplicationMonitor(new ReplicationMonitor());
|
||||
|
||||
final long pendingPeriod = conf.getLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
|
||||
|
@ -403,22 +402,6 @@ public class BlockManager {
|
|||
return storagePolicySuite.getPolicy(policyName);
|
||||
}
|
||||
|
||||
public long getReplicationRecheckInterval() {
|
||||
return replicationRecheckInterval;
|
||||
}
|
||||
|
||||
public AtomicLong excessBlocksCount() {
|
||||
return excessBlocksCount;
|
||||
}
|
||||
|
||||
public void clearInvalidateBlocks() {
|
||||
invalidateBlocks.clear();
|
||||
}
|
||||
|
||||
void setReplicationMonitor(Runnable replicationMonitor) {
|
||||
replicationThread = new Daemon(replicationMonitor);
|
||||
}
|
||||
|
||||
public void setBlockPoolId(String blockPoolId) {
|
||||
if (isBlockTokenEnabled()) {
|
||||
blockTokenSecretManager.setBlockPoolId(blockPoolId);
|
||||
|
@ -1664,7 +1647,7 @@ public class BlockManager {
|
|||
* If there were any replication requests that timed out, reap them
|
||||
* and put them back into the neededReplication queue
|
||||
*/
|
||||
void processPendingReplications() {
|
||||
private void processPendingReplications() {
|
||||
Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
|
||||
if (timedOutItems != null) {
|
||||
namesystem.writeLock();
|
||||
|
|
|
@ -1053,7 +1053,7 @@ public class DatanodeManager {
|
|||
* 3. Added to exclude --> start decommission.
|
||||
* 4. Removed from exclude --> stop decommission.
|
||||
*/
|
||||
void refreshDatanodes() {
|
||||
private void refreshDatanodes() {
|
||||
for(DatanodeDescriptor node : datanodeMap.values()) {
|
||||
// Check if not include.
|
||||
if (!hostFileManager.isIncluded(node)) {
|
||||
|
@ -1586,9 +1586,5 @@ public class DatanodeManager {
|
|||
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
|
||||
this.shouldSendCachingCommands = shouldSendCachingCommands;
|
||||
}
|
||||
|
||||
public HostFileManager getHostFileManager() {
|
||||
return this.hostFileManager;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,10 +129,6 @@ class HostFileManager {
|
|||
void refresh(String includeFile, String excludeFile) throws IOException {
|
||||
HostSet newIncludes = readFile("included", includeFile);
|
||||
HostSet newExcludes = readFile("excluded", excludeFile);
|
||||
setHosts(newIncludes, newExcludes);
|
||||
}
|
||||
|
||||
void setHosts(HostSet newIncludes, HostSet newExcludes) {
|
||||
synchronized (this) {
|
||||
includes = newIncludes;
|
||||
excludes = newExcludes;
|
||||
|
|
|
@ -474,11 +474,14 @@ class BPOfferService {
|
|||
* Signal the current rolling upgrade status as indicated by the NN.
|
||||
* @param inProgress true if a rolling upgrade is in progress
|
||||
*/
|
||||
void signalRollingUpgrade(boolean inProgress) {
|
||||
void signalRollingUpgrade(boolean inProgress) throws IOException {
|
||||
String bpid = getBlockPoolId();
|
||||
if (inProgress) {
|
||||
dn.getFSDataset().enableTrash(getBlockPoolId());
|
||||
dn.getFSDataset().enableTrash(bpid);
|
||||
dn.getFSDataset().setRollingUpgradeMarker(bpid);
|
||||
} else {
|
||||
dn.getFSDataset().restoreTrash(getBlockPoolId());
|
||||
dn.getFSDataset().restoreTrash(bpid);
|
||||
dn.getFSDataset().clearRollingUpgradeMarker(bpid);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -627,7 +627,7 @@ class BPServiceActor implements Runnable {
|
|||
bpos.shutdownActor(this);
|
||||
}
|
||||
|
||||
private void handleRollingUpgradeStatus(HeartbeatResponse resp) {
|
||||
private void handleRollingUpgradeStatus(HeartbeatResponse resp) throws IOException {
|
||||
RollingUpgradeStatus rollingUpgradeStatus = resp.getRollingUpdateStatus();
|
||||
if (rollingUpgradeStatus != null &&
|
||||
rollingUpgradeStatus.getBlockPoolId().compareTo(bpos.getBlockPoolId()) != 0) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.HardLink;
|
||||
|
@ -38,8 +39,11 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -62,6 +66,18 @@ import java.util.regex.Pattern;
|
|||
public class BlockPoolSliceStorage extends Storage {
|
||||
static final String TRASH_ROOT_DIR = "trash";
|
||||
|
||||
/**
|
||||
* A marker file that is created on each root directory if a rolling upgrade
|
||||
* is in progress. The NN does not inform the DN when a rolling upgrade is
|
||||
* finalized. All the DN can infer is whether or not a rolling upgrade is
|
||||
* currently in progress. When the rolling upgrade is not in progress:
|
||||
* 1. If the marker file is present, then a rolling upgrade just completed.
|
||||
* If a 'previous' directory exists, it can be deleted now.
|
||||
* 2. If the marker file is absent, then a regular upgrade may be in
|
||||
* progress. Do not delete the 'previous' directory.
|
||||
*/
|
||||
static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
|
||||
|
||||
private static final String BLOCK_POOL_ID_PATTERN_BASE =
|
||||
Pattern.quote(File.separator) +
|
||||
"BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" +
|
||||
|
@ -83,6 +99,13 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
blockpoolID = bpid;
|
||||
}
|
||||
|
||||
/**
|
||||
* These maps are used as an optimization to avoid one filesystem operation
|
||||
* per storage on each heartbeat response.
|
||||
*/
|
||||
private static Set<String> storagesWithRollingUpgradeMarker;
|
||||
private static Set<String> storagesWithoutRollingUpgradeMarker;
|
||||
|
||||
BlockPoolSliceStorage(int namespaceID, String bpID, long cTime,
|
||||
String clusterId) {
|
||||
super(NodeType.DATA_NODE);
|
||||
|
@ -90,10 +113,18 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
this.blockpoolID = bpID;
|
||||
this.cTime = cTime;
|
||||
this.clusterID = clusterId;
|
||||
storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
|
||||
new ConcurrentHashMap<String, Boolean>());
|
||||
storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
|
||||
new ConcurrentHashMap<String, Boolean>());
|
||||
}
|
||||
|
||||
private BlockPoolSliceStorage() {
|
||||
super(NodeType.DATA_NODE);
|
||||
storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
|
||||
new ConcurrentHashMap<String, Boolean>());
|
||||
storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
|
||||
new ConcurrentHashMap<String, Boolean>());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -270,13 +301,9 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
private void doTransition(DataNode datanode, StorageDirectory sd,
|
||||
NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
|
||||
if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) {
|
||||
// we will already restore everything in the trash by rolling back to
|
||||
// the previous directory, so we must delete the trash to ensure
|
||||
// that it's not restored by BPOfferService.signalRollingUpgrade()
|
||||
if (!FileUtil.fullyDelete(getTrashRootDir(sd))) {
|
||||
throw new IOException("Unable to delete trash directory prior to " +
|
||||
"restoration of previous directory: " + getTrashRootDir(sd));
|
||||
}
|
||||
Preconditions.checkState(!getTrashRootDir(sd).exists(),
|
||||
sd.getPreviousDir() + " and " + getTrashRootDir(sd) + " should not " +
|
||||
" both be present.");
|
||||
doRollback(sd, nsInfo); // rollback if applicable
|
||||
} else {
|
||||
// Restore all the files in the trash. The restored files are retained
|
||||
|
@ -440,10 +467,18 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
}
|
||||
|
||||
final File newChild = new File(restoreDirectory, child.getName());
|
||||
if (!child.renameTo(newChild)) {
|
||||
|
||||
if (newChild.exists() && newChild.length() >= child.length()) {
|
||||
// Failsafe - we should not hit this case but let's make sure
|
||||
// we never overwrite a newer version of a block file with an
|
||||
// older version.
|
||||
LOG.info("Not overwriting " + newChild + " with smaller file from " +
|
||||
"trash directory. This message can be safely ignored.");
|
||||
} else if (!child.renameTo(newChild)) {
|
||||
throw new IOException("Failed to rename " + child + " to " + newChild);
|
||||
} else {
|
||||
++filesRestored;
|
||||
}
|
||||
++filesRestored;
|
||||
}
|
||||
FileUtil.fullyDelete(trashRoot);
|
||||
return filesRestored;
|
||||
|
@ -599,6 +634,18 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
return new File(sd.getRoot(), TRASH_ROOT_DIR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether we can use trash for the given blockFile. Trash
|
||||
* is disallowed if a 'previous' directory exists for the
|
||||
* storage directory containing the block.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public boolean isTrashAllowed(File blockFile) {
|
||||
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
|
||||
String previousDir = matcher.replaceFirst("$1$2" + STORAGE_DIR_PREVIOUS);
|
||||
return !(new File(previousDir)).exists();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a target subdirectory under trash/ for a given block file that is being
|
||||
* deleted.
|
||||
|
@ -609,9 +656,12 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
* @return the trash directory for a given block file that is being deleted.
|
||||
*/
|
||||
public String getTrashDirectory(File blockFile) {
|
||||
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
|
||||
String trashDirectory = matcher.replaceFirst("$1$2" + TRASH_ROOT_DIR + "$4");
|
||||
return trashDirectory;
|
||||
if (isTrashAllowed(blockFile)) {
|
||||
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
|
||||
String trashDirectory = matcher.replaceFirst("$1$2" + TRASH_ROOT_DIR + "$4");
|
||||
return trashDirectory;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -638,6 +688,7 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
for (StorageDirectory sd : storageDirs) {
|
||||
File trashRoot = getTrashRootDir(sd);
|
||||
try {
|
||||
Preconditions.checkState(!(trashRoot.exists() && sd.getPreviousDir().exists()));
|
||||
restoreBlockFilesFromTrash(trashRoot);
|
||||
FileUtil.fullyDelete(getTrashRootDir(sd));
|
||||
} catch (IOException ioe) {
|
||||
|
@ -656,4 +707,49 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a rolling upgrade marker file for each BP storage root, if it
|
||||
* does not exist already.
|
||||
*/
|
||||
public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
|
||||
throws IOException {
|
||||
for (StorageDirectory sd : dnStorageDirs) {
|
||||
File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
|
||||
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
|
||||
if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) {
|
||||
if (!markerFile.exists() && markerFile.createNewFile()) {
|
||||
LOG.info("Created " + markerFile);
|
||||
} else {
|
||||
LOG.info(markerFile + " already exists.");
|
||||
}
|
||||
storagesWithRollingUpgradeMarker.add(bpRoot.toString());
|
||||
storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether the rolling upgrade marker file exists for each BP storage
|
||||
* root. If it does exist, then the marker file is cleared and more
|
||||
* importantly the layout upgrade is finalized.
|
||||
*/
|
||||
public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
|
||||
throws IOException {
|
||||
for (StorageDirectory sd : dnStorageDirs) {
|
||||
File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
|
||||
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
|
||||
if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) {
|
||||
if (markerFile.exists()) {
|
||||
LOG.info("Deleting " + markerFile);
|
||||
doFinalize(sd.getCurrentDir());
|
||||
if (!markerFile.delete()) {
|
||||
LOG.warn("Failed to delete " + markerFile);
|
||||
}
|
||||
}
|
||||
storagesWithoutRollingUpgradeMarker.add(bpRoot.toString());
|
||||
storagesWithRollingUpgradeMarker.remove(bpRoot.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,11 +83,17 @@ public class DataStorage extends Storage {
|
|||
public final static String STORAGE_DIR_FINALIZED = "finalized";
|
||||
public final static String STORAGE_DIR_TMP = "tmp";
|
||||
|
||||
// Set of bpids for which 'trash' is currently enabled.
|
||||
// When trash is enabled block files are moved under a separate
|
||||
// 'trash' folder instead of being deleted right away. This can
|
||||
// be useful during rolling upgrades, for example.
|
||||
// The set is backed by a concurrent HashMap.
|
||||
/**
|
||||
* Set of bpids for which 'trash' is currently enabled.
|
||||
* When trash is enabled block files are moved under a separate
|
||||
* 'trash' folder instead of being deleted right away. This can
|
||||
* be useful during rolling upgrades, for example.
|
||||
* The set is backed by a concurrent HashMap.
|
||||
*
|
||||
* Even if trash is enabled, it is not used if a layout upgrade
|
||||
* is in progress for a storage directory i.e. if the previous
|
||||
* directory exists.
|
||||
*/
|
||||
private Set<String> trashEnabledBpids;
|
||||
|
||||
/**
|
||||
|
@ -136,7 +142,9 @@ public class DataStorage extends Storage {
|
|||
}
|
||||
|
||||
/**
|
||||
* Enable trash for the specified block pool storage.
|
||||
* Enable trash for the specified block pool storage. Even if trash is
|
||||
* enabled by the caller, it is superseded by the 'previous' directory
|
||||
* if a layout upgrade is in progress.
|
||||
*/
|
||||
public void enableTrash(String bpid) {
|
||||
if (trashEnabledBpids.add(bpid)) {
|
||||
|
@ -156,6 +164,14 @@ public class DataStorage extends Storage {
|
|||
return trashEnabledBpids.contains(bpid);
|
||||
}
|
||||
|
||||
public void setRollingUpgradeMarker(String bpid) throws IOException {
|
||||
getBPStorage(bpid).setRollingUpgradeMarkers(storageDirs);
|
||||
}
|
||||
|
||||
public void clearRollingUpgradeMarker(String bpid) throws IOException {
|
||||
getBPStorage(bpid).clearRollingUpgradeMarkers(storageDirs);
|
||||
}
|
||||
|
||||
/**
|
||||
* If rolling upgrades are in progress then do not delete block files
|
||||
* immediately. Instead we move the block files to an intermediate
|
||||
|
@ -688,7 +704,8 @@ public class DataStorage extends Storage {
|
|||
if (DataNodeLayoutVersion.supports(
|
||||
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
||||
// The VERSION file is already read in. Override the layoutVersion
|
||||
// field and overwrite the file.
|
||||
// field and overwrite the file. The upgrade work is handled by
|
||||
// {@link BlockPoolSliceStorage#doUpgrade}
|
||||
LOG.info("Updating layout version from " + layoutVersion + " to "
|
||||
+ HdfsConstants.DATANODE_LAYOUT_VERSION + " for storage "
|
||||
+ sd.getRoot());
|
||||
|
|
|
@ -443,6 +443,17 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
|
|||
*/
|
||||
public boolean trashEnabled(String bpid);
|
||||
|
||||
/**
|
||||
* Create a marker file indicating that a rolling upgrade is in progress.
|
||||
*/
|
||||
public void setRollingUpgradeMarker(String bpid) throws IOException;
|
||||
|
||||
/**
|
||||
* Delete the rolling upgrade marker file if it exists.
|
||||
* @param bpid
|
||||
*/
|
||||
public void clearRollingUpgradeMarker(String bpid) throws IOException;
|
||||
|
||||
/**
|
||||
* submit a sync_file_range request to AsyncDiskService
|
||||
*/
|
||||
|
|
|
@ -2039,6 +2039,16 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
return dataStorage.trashEnabled(bpid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRollingUpgradeMarker(String bpid) throws IOException {
|
||||
dataStorage.setRollingUpgradeMarker(bpid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearRollingUpgradeMarker(String bpid) throws IOException {
|
||||
dataStorage.clearRollingUpgradeMarker(bpid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RollingLogs createRollingLogs(String bpid, String prefix
|
||||
) throws IOException {
|
||||
|
|
|
@ -111,6 +111,18 @@ public class EncryptionZoneManager {
|
|||
*/
|
||||
void addEncryptionZone(Long inodeId, String keyName) {
|
||||
assert dir.hasWriteLock();
|
||||
unprotectedAddEncryptionZone(inodeId, keyName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new encryption zone.
|
||||
* <p/>
|
||||
* Does not assume that the FSDirectory lock is held.
|
||||
*
|
||||
* @param inodeId of the encryption zone
|
||||
* @param keyName encryption zone key name
|
||||
*/
|
||||
void unprotectedAddEncryptionZone(Long inodeId, String keyName) {
|
||||
final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyName);
|
||||
encryptionZones.put(inodeId, ez);
|
||||
}
|
||||
|
|
|
@ -2158,7 +2158,7 @@ public class FSDirectory implements Closeable {
|
|||
for (XAttr xattr : xattrs) {
|
||||
final String xaName = XAttrHelper.getPrefixName(xattr);
|
||||
if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) {
|
||||
ezManager.addEncryptionZone(inode.getId(),
|
||||
ezManager.unprotectedAddEncryptionZone(inode.getId(),
|
||||
new String(xattr.getValue()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,7 +82,12 @@ public final class FSImageFormatPBINode {
|
|||
private static final int XATTR_NAMESPACE_OFFSET = 30;
|
||||
private static final int XATTR_NAME_MASK = (1 << 24) - 1;
|
||||
private static final int XATTR_NAME_OFFSET = 6;
|
||||
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
|
||||
|
||||
/* See the comments in fsimage.proto for an explanation of the following. */
|
||||
private static final int XATTR_NAMESPACE_EXT_OFFSET = 5;
|
||||
private static final int XATTR_NAMESPACE_EXT_MASK = 1;
|
||||
|
||||
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
|
||||
XAttr.NameSpace.values();
|
||||
|
||||
|
||||
|
@ -122,6 +127,8 @@ public final class FSImageFormatPBINode {
|
|||
int v = xAttrCompactProto.getName();
|
||||
int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK;
|
||||
int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
|
||||
ns |=
|
||||
((v >> XATTR_NAMESPACE_EXT_OFFSET) & XATTR_NAMESPACE_EXT_MASK) << 2;
|
||||
String name = stringTable[nid];
|
||||
byte[] value = null;
|
||||
if (xAttrCompactProto.getValue() != null) {
|
||||
|
@ -371,10 +378,13 @@ public final class FSImageFormatPBINode {
|
|||
for (XAttr a : f.getXAttrs()) {
|
||||
XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
|
||||
newBuilder();
|
||||
int v = ((a.getNameSpace().ordinal() & XATTR_NAMESPACE_MASK) <<
|
||||
XATTR_NAMESPACE_OFFSET)
|
||||
| ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) <<
|
||||
int nsOrd = a.getNameSpace().ordinal();
|
||||
Preconditions.checkArgument(nsOrd < 8, "Too many namespaces.");
|
||||
int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET)
|
||||
| ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) <<
|
||||
XATTR_NAME_OFFSET);
|
||||
v |= (((nsOrd >> 2) & XATTR_NAMESPACE_EXT_MASK) <<
|
||||
XATTR_NAMESPACE_EXT_OFFSET);
|
||||
xAttrCompactBuilder.setName(v);
|
||||
if (a.getValue() != null) {
|
||||
xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue()));
|
||||
|
|
|
@ -978,7 +978,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return Collections.unmodifiableList(auditLoggers);
|
||||
}
|
||||
|
||||
protected void loadFSImage(StartupOption startOpt) throws IOException {
|
||||
private void loadFSImage(StartupOption startOpt) throws IOException {
|
||||
final FSImage fsImage = getFSImage();
|
||||
|
||||
// format before starting up if requested
|
||||
|
@ -1026,7 +1026,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
imageLoadComplete();
|
||||
}
|
||||
|
||||
protected void startSecretManager() {
|
||||
private void startSecretManager() {
|
||||
if (dtSecretManager != null) {
|
||||
try {
|
||||
dtSecretManager.startThreads();
|
||||
|
@ -1038,7 +1038,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
}
|
||||
|
||||
protected void startSecretManagerIfNecessary() {
|
||||
private void startSecretManagerIfNecessary() {
|
||||
boolean shouldRun = shouldUseDelegationTokens() &&
|
||||
!isInSafeMode() && getEditLog().isOpenForWrite();
|
||||
boolean running = dtSecretManager.isRunning();
|
||||
|
@ -1188,7 +1188,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return haEnabled && inActiveState() && startingActiveService;
|
||||
}
|
||||
|
||||
protected boolean shouldUseDelegationTokens() {
|
||||
private boolean shouldUseDelegationTokens() {
|
||||
return UserGroupInformation.isSecurityEnabled() ||
|
||||
alwaysUseDelegationTokensForTests;
|
||||
}
|
||||
|
@ -2775,7 +2775,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @throws UnresolvedLinkException
|
||||
* @throws IOException
|
||||
*/
|
||||
protected
|
||||
LocatedBlock prepareFileForWrite(String src, INodeFile file,
|
||||
String leaseHolder, String clientMachine,
|
||||
boolean writeToEditLog,
|
||||
|
@ -3235,7 +3234,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return new FileState(pendingFile, src);
|
||||
}
|
||||
|
||||
protected
|
||||
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
|
||||
long offset) throws IOException {
|
||||
LocatedBlock lBlk = new LocatedBlock(
|
||||
|
@ -3354,8 +3352,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return true;
|
||||
}
|
||||
|
||||
protected INodeFile checkLease(String src, String holder, INode inode,
|
||||
long fileId)
|
||||
private INodeFile checkLease(String src, String holder, INode inode,
|
||||
long fileId)
|
||||
throws LeaseExpiredException, FileNotFoundException {
|
||||
assert hasReadLock();
|
||||
final String ident = src + " (inode " + fileId + ")";
|
||||
|
@ -4474,7 +4472,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return leaseManager.reassignLease(lease, src, newHolder);
|
||||
}
|
||||
|
||||
protected void commitOrCompleteLastBlock(final INodeFile fileINode,
|
||||
private void commitOrCompleteLastBlock(final INodeFile fileINode,
|
||||
final Block commitBlock) throws IOException {
|
||||
assert hasWriteLock();
|
||||
Preconditions.checkArgument(fileINode.isUnderConstruction());
|
||||
|
@ -4872,7 +4870,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @return an array of datanode commands
|
||||
* @throws IOException
|
||||
*/
|
||||
protected
|
||||
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
|
||||
StorageReport[] reports, long cacheCapacity, long cacheUsed,
|
||||
int xceiverCount, int xmitsInProgress, int failedVolumes)
|
||||
|
@ -4922,8 +4919,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @param file
|
||||
* @param logRetryCache
|
||||
*/
|
||||
protected void persistBlocks(String path, INodeFile file,
|
||||
boolean logRetryCache) {
|
||||
private void persistBlocks(String path, INodeFile file,
|
||||
boolean logRetryCache) {
|
||||
assert hasWriteLock();
|
||||
Preconditions.checkArgument(file.isUnderConstruction());
|
||||
getEditLog().logUpdateBlocks(path, file, logRetryCache);
|
||||
|
@ -5348,7 +5345,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @param path
|
||||
* @param file
|
||||
*/
|
||||
protected void persistNewBlock(String path, INodeFile file) {
|
||||
private void persistNewBlock(String path, INodeFile file) {
|
||||
Preconditions.checkArgument(file.isUnderConstruction());
|
||||
getEditLog().logAddBlock(path, file);
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
|
@ -7226,7 +7223,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
*
|
||||
* @return true if delegation token operation is allowed
|
||||
*/
|
||||
protected boolean isAllowedDelegationTokenOp() throws IOException {
|
||||
private boolean isAllowedDelegationTokenOp() throws IOException {
|
||||
AuthenticationMethod authMethod = getConnectionAuthenticationMethod();
|
||||
if (UserGroupInformation.isSecurityEnabled()
|
||||
&& (authMethod != AuthenticationMethod.KERBEROS)
|
||||
|
@ -7393,13 +7390,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
|
||||
for (DatanodeDescriptor node : live) {
|
||||
info.put(node.getHostName(), getLiveNodeInfo(node));
|
||||
}
|
||||
return JSON.toString(info);
|
||||
}
|
||||
|
||||
protected Map<String, Object> getLiveNodeInfo(DatanodeDescriptor node) {
|
||||
return ImmutableMap.<String, Object>builder()
|
||||
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
|
||||
.put("infoAddr", node.getInfoAddr())
|
||||
.put("infoSecureAddr", node.getInfoSecureAddr())
|
||||
.put("xferaddr", node.getXferAddr())
|
||||
|
@ -7417,6 +7408,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent())
|
||||
.put("volfails", node.getVolumeFailures())
|
||||
.build();
|
||||
|
||||
info.put(node.getHostName(), innerinfo);
|
||||
}
|
||||
return JSON.toString(info);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7701,16 +7696,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
public ReentrantLock getLongReadLockForTests() {
|
||||
return fsLock.longReadLock;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public SafeModeInfo getSafeModeInfoForTests() {
|
||||
return safeMode;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) {
|
||||
this.nnResourceChecker = nnResourceChecker;
|
||||
}
|
||||
|
||||
public SafeModeInfo getSafeModeInfo() {
|
||||
return safeMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAvoidingStaleDataNodesForWrite() {
|
||||
return this.blockManager.getDatanodeManager()
|
||||
|
|
|
@ -67,8 +67,9 @@ public class NameNodeLayoutVersion {
|
|||
EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
|
||||
XATTRS(-57, "Extended attributes"),
|
||||
CREATE_OVERWRITE(-58, "Use single editlog record for " +
|
||||
"creating file with overwrite"),
|
||||
BLOCK_STORAGE_POLICY(-59, "Block Storage policy");
|
||||
"creating file with overwrite"),
|
||||
XATTRS_NAMESPACE_EXT(-59, "Increase number of xattr namespaces"),
|
||||
BLOCK_STORAGE_POLICY(-60, "Block Storage policy");
|
||||
|
||||
private final FeatureInfo info;
|
||||
|
||||
|
|
|
@ -113,8 +113,12 @@ message INodeSection {
|
|||
*
|
||||
* [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
|
||||
* [2:26) -- the name of the entry, which is an ID that points to a
|
||||
* string in the StringTableSection.
|
||||
* [26:32) -- reserved for future uses.
|
||||
* string in the StringTableSection.
|
||||
* [26:27) -- namespace extension. Originally there were only 4 namespaces
|
||||
* so only 2 bits were needed. At that time, this bit was reserved. When a
|
||||
* 5th namespace was created (raw) this bit became used as a 3rd namespace
|
||||
* bit.
|
||||
* [27:32) -- reserved for future uses.
|
||||
*/
|
||||
required fixed32 name = 1;
|
||||
optional bytes value = 2;
|
||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
|
||||
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
@ -83,6 +84,8 @@ import org.apache.hadoop.util.VersionInfo;
|
|||
import org.junit.Assume;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.net.*;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
|
@ -1508,4 +1511,38 @@ public class DFSTestUtil {
|
|||
throws Exception {
|
||||
FsShellRun(cmd, 0, null, conf);
|
||||
}
|
||||
|
||||
public static void addDataNodeLayoutVersion(final int lv, final String description)
|
||||
throws NoSuchFieldException, IllegalAccessException {
|
||||
Preconditions.checkState(lv < DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
|
||||
|
||||
// Override {@link DataNodeLayoutVersion#CURRENT_LAYOUT_VERSION} via reflection.
|
||||
Field modifiersField = Field.class.getDeclaredField("modifiers");
|
||||
modifiersField.setAccessible(true);
|
||||
Field field = DataNodeLayoutVersion.class.getField("CURRENT_LAYOUT_VERSION");
|
||||
field.setAccessible(true);
|
||||
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
|
||||
field.setInt(null, lv);
|
||||
|
||||
// Override {@link HdfsConstants#DATANODE_LAYOUT_VERSION}
|
||||
field = HdfsConstants.class.getField("DATANODE_LAYOUT_VERSION");
|
||||
field.setAccessible(true);
|
||||
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
|
||||
field.setInt(null, lv);
|
||||
|
||||
// Inject the feature into the FEATURES map.
|
||||
final LayoutVersion.FeatureInfo featureInfo =
|
||||
new LayoutVersion.FeatureInfo(lv, lv + 1, description, false);
|
||||
final LayoutVersion.LayoutFeature feature =
|
||||
new LayoutVersion.LayoutFeature() {
|
||||
@Override
|
||||
public LayoutVersion.FeatureInfo getInfo() {
|
||||
return featureInfo;
|
||||
}
|
||||
};
|
||||
|
||||
// Update the FEATURES map with the new layout version.
|
||||
LayoutVersion.updateMap(DataNodeLayoutVersion.FEATURES,
|
||||
new LayoutVersion.LayoutFeature[] { feature });
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
@ -314,6 +315,13 @@ public class TestEncryptionZones {
|
|||
assertNumZones(numZones);
|
||||
assertZonePresent(null, zonePath.toString());
|
||||
}
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
cluster.restartNameNode(true);
|
||||
assertNumZones(numZones);
|
||||
assertZonePresent(null, zone1.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -72,7 +72,7 @@ public class TestBalancer {
|
|||
((Log4JLogger)Balancer.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
final static long CAPACITY = 500L;
|
||||
final static long CAPACITY = 5000L;
|
||||
final static String RACK0 = "/rack0";
|
||||
final static String RACK1 = "/rack1";
|
||||
final static String RACK2 = "/rack2";
|
||||
|
@ -85,7 +85,7 @@ public class TestBalancer {
|
|||
static final long TIMEOUT = 40000L; //msec
|
||||
static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5%
|
||||
static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta
|
||||
static final int DEFAULT_BLOCK_SIZE = 10;
|
||||
static final int DEFAULT_BLOCK_SIZE = 100;
|
||||
private static final Random r = new Random();
|
||||
|
||||
static {
|
||||
|
@ -96,6 +96,7 @@ public class TestBalancer {
|
|||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
|
||||
}
|
||||
|
|
|
@ -1072,6 +1072,14 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRollingUpgradeMarker(String bpid) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearRollingUpgradeMarker(String bpid) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkAndUpdate(String bpid, long blockId, File diskFile,
|
||||
File diskMetaFile, FsVolumeSpi vol) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
@ -28,6 +29,8 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -47,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -64,7 +68,7 @@ public class TestDataNodeRollingUpgrade {
|
|||
Configuration conf;
|
||||
MiniDFSCluster cluster = null;
|
||||
DistributedFileSystem fs = null;
|
||||
DataNode dn = null;
|
||||
DataNode dn0 = null;
|
||||
NameNode nn = null;
|
||||
String blockPoolId = null;
|
||||
|
||||
|
@ -76,8 +80,8 @@ public class TestDataNodeRollingUpgrade {
|
|||
fs = cluster.getFileSystem();
|
||||
nn = cluster.getNameNode(0);
|
||||
assertNotNull(nn);
|
||||
dn = cluster.getDataNodes().get(0);
|
||||
assertNotNull(dn);
|
||||
dn0 = cluster.getDataNodes().get(0);
|
||||
assertNotNull(dn0);
|
||||
blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
|
||||
}
|
||||
|
||||
|
@ -88,7 +92,7 @@ public class TestDataNodeRollingUpgrade {
|
|||
}
|
||||
fs = null;
|
||||
nn = null;
|
||||
dn = null;
|
||||
dn0 = null;
|
||||
blockPoolId = null;
|
||||
}
|
||||
|
||||
|
@ -103,9 +107,10 @@ public class TestDataNodeRollingUpgrade {
|
|||
private File getBlockForFile(Path path, boolean exists) throws IOException {
|
||||
LocatedBlocks blocks = nn.getRpcServer().getBlockLocations(path.toString(),
|
||||
0, Long.MAX_VALUE);
|
||||
assertEquals(1, blocks.getLocatedBlocks().size());
|
||||
assertEquals("The test helper functions assume that each file has a single block",
|
||||
1, blocks.getLocatedBlocks().size());
|
||||
ExtendedBlock block = blocks.getLocatedBlocks().get(0).getBlock();
|
||||
BlockLocalPathInfo bInfo = dn.getFSDataset().getBlockLocalPathInfo(block);
|
||||
BlockLocalPathInfo bInfo = dn0.getFSDataset().getBlockLocalPathInfo(block);
|
||||
File blockFile = new File(bInfo.getBlockPath());
|
||||
assertEquals(exists, blockFile.exists());
|
||||
return blockFile;
|
||||
|
@ -113,7 +118,7 @@ public class TestDataNodeRollingUpgrade {
|
|||
|
||||
private File getTrashFileForBlock(File blockFile, boolean exists) {
|
||||
File trashFile = new File(
|
||||
dn.getStorage().getTrashDirectoryForBlockFile(blockPoolId, blockFile));
|
||||
dn0.getStorage().getTrashDirectoryForBlockFile(blockPoolId, blockFile));
|
||||
assertEquals(exists, trashFile.exists());
|
||||
return trashFile;
|
||||
}
|
||||
|
@ -135,11 +140,10 @@ public class TestDataNodeRollingUpgrade {
|
|||
assertFalse(blockFile.exists());
|
||||
}
|
||||
|
||||
private void ensureTrashDisabled() {
|
||||
private boolean isTrashRootPresent() {
|
||||
// Trash is disabled; trash root does not exist
|
||||
assertFalse(dn.getFSDataset().trashEnabled(blockPoolId));
|
||||
BlockPoolSliceStorage bps = dn.getStorage().getBPStorage(blockPoolId);
|
||||
assertFalse(bps.trashEnabled());
|
||||
BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId);
|
||||
return bps.trashEnabled();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -149,17 +153,25 @@ public class TestDataNodeRollingUpgrade {
|
|||
throws Exception {
|
||||
assertTrue(blockFile.exists());
|
||||
assertFalse(trashFile.exists());
|
||||
ensureTrashDisabled();
|
||||
assertFalse(isTrashRootPresent());
|
||||
}
|
||||
|
||||
private boolean isBlockFileInPrevious(File blockFile) {
|
||||
Pattern blockFilePattern = Pattern.compile("^(.*/current/.*/)(current)(/.*)$");
|
||||
Matcher matcher = blockFilePattern.matcher(blockFile.toString());
|
||||
String previousFileName = matcher.replaceFirst("$1" + "previous" + "$3");
|
||||
return ((new File(previousFileName)).exists());
|
||||
}
|
||||
|
||||
private void startRollingUpgrade() throws Exception {
|
||||
LOG.info("Starting rolling upgrade");
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
final DFSAdmin dfsadmin = new DFSAdmin(conf);
|
||||
TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
|
||||
triggerHeartBeats();
|
||||
|
||||
// Ensure datanode rolling upgrade is started
|
||||
assertTrue(dn.getFSDataset().trashEnabled(blockPoolId));
|
||||
assertTrue(dn0.getFSDataset().trashEnabled(blockPoolId));
|
||||
}
|
||||
|
||||
private void finalizeRollingUpgrade() throws Exception {
|
||||
|
@ -169,8 +181,8 @@ public class TestDataNodeRollingUpgrade {
|
|||
triggerHeartBeats();
|
||||
|
||||
// Ensure datanode rolling upgrade is started
|
||||
assertFalse(dn.getFSDataset().trashEnabled(blockPoolId));
|
||||
BlockPoolSliceStorage bps = dn.getStorage().getBPStorage(blockPoolId);
|
||||
assertFalse(dn0.getFSDataset().trashEnabled(blockPoolId));
|
||||
BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId);
|
||||
assertFalse(bps.trashEnabled());
|
||||
}
|
||||
|
||||
|
@ -179,13 +191,15 @@ public class TestDataNodeRollingUpgrade {
|
|||
// Restart the namenode with rolling upgrade rollback
|
||||
LOG.info("Starting rollback of the rolling upgrade");
|
||||
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
|
||||
dnprop.setDnArgs("-rollback");
|
||||
cluster.shutdownNameNodes();
|
||||
cluster.restartNameNode("-rollingupgrade", "rollback");
|
||||
cluster.restartDataNode(dnprop);
|
||||
cluster.waitActive();
|
||||
nn = cluster.getNameNode(0);
|
||||
dn = cluster.getDataNodes().get(0);
|
||||
dn0 = cluster.getDataNodes().get(0);
|
||||
triggerHeartBeats();
|
||||
LOG.info("The cluster is active after rollback");
|
||||
}
|
||||
|
||||
@Test (timeout=600000)
|
||||
|
@ -194,12 +208,11 @@ public class TestDataNodeRollingUpgrade {
|
|||
startCluster();
|
||||
|
||||
// Create files in DFS.
|
||||
Path testFile1 = new Path("/TestDataNodeRollingUpgrade1.dat");
|
||||
Path testFile2 = new Path("/TestDataNodeRollingUpgrade2.dat");
|
||||
Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
|
||||
Path testFile2 = new Path("/" + GenericTestUtils.getMethodName() + ".02.dat");
|
||||
DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
|
||||
DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED);
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
startRollingUpgrade();
|
||||
File blockFile = getBlockForFile(testFile2, true);
|
||||
File trashFile = getTrashFileForBlock(blockFile, false);
|
||||
|
@ -207,7 +220,7 @@ public class TestDataNodeRollingUpgrade {
|
|||
finalizeRollingUpgrade();
|
||||
|
||||
// Ensure that delete file testFile2 stays deleted after finalize
|
||||
ensureTrashDisabled();
|
||||
assertFalse(isTrashRootPresent());
|
||||
assert(!fs.exists(testFile2));
|
||||
assert(fs.exists(testFile1));
|
||||
|
||||
|
@ -222,11 +235,10 @@ public class TestDataNodeRollingUpgrade {
|
|||
startCluster();
|
||||
|
||||
// Create files in DFS.
|
||||
Path testFile1 = new Path("/TestDataNodeRollingUpgrade1.dat");
|
||||
Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
|
||||
DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
|
||||
String fileContents1 = DFSTestUtil.readFile(fs, testFile1);
|
||||
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
startRollingUpgrade();
|
||||
|
||||
File blockFile = getBlockForFile(testFile1, true);
|
||||
|
@ -255,9 +267,9 @@ public class TestDataNodeRollingUpgrade {
|
|||
startCluster();
|
||||
|
||||
// Create files in DFS.
|
||||
String testFile1 = "/TestDataNodeXceiver1.dat";
|
||||
String testFile2 = "/TestDataNodeXceiver2.dat";
|
||||
String testFile3 = "/TestDataNodeXceiver3.dat";
|
||||
String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
|
||||
String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
|
||||
String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";
|
||||
|
||||
DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
|
||||
DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
|
||||
|
@ -277,12 +289,12 @@ public class TestDataNodeRollingUpgrade {
|
|||
s3.write(toWrite, 0, 1024*1024*8);
|
||||
s3.flush();
|
||||
|
||||
assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer()
|
||||
assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
|
||||
.getNumPeersXceiver());
|
||||
s1.close();
|
||||
s2.close();
|
||||
s3.close();
|
||||
assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer()
|
||||
assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
|
||||
.getNumPeersXceiver());
|
||||
client1.close();
|
||||
client2.close();
|
||||
|
@ -291,4 +303,143 @@ public class TestDataNodeRollingUpgrade {
|
|||
shutdownCluster();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Support for layout version change with rolling upgrade was
|
||||
* added by HDFS-6800 and HDFS-6981.
|
||||
*/
|
||||
@Test(timeout=300000)
|
||||
public void testWithLayoutChangeAndFinalize() throws Exception {
|
||||
final long seed = 0x600DF00D;
|
||||
try {
|
||||
startCluster();
|
||||
|
||||
Path[] paths = new Path[3];
|
||||
File[] blockFiles = new File[3];
|
||||
|
||||
// Create two files in DFS.
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat");
|
||||
DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 2, seed);
|
||||
}
|
||||
|
||||
startRollingUpgrade();
|
||||
|
||||
// Delete the first file. The DN will save its block files in trash.
|
||||
blockFiles[0] = getBlockForFile(paths[0], true);
|
||||
File trashFile0 = getTrashFileForBlock(blockFiles[0], false);
|
||||
deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);
|
||||
|
||||
// Restart the DN with a new layout version to trigger layout upgrade.
|
||||
LOG.info("Shutting down the Datanode");
|
||||
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
|
||||
DFSTestUtil.addDataNodeLayoutVersion(
|
||||
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1,
|
||||
"Test Layout for TestDataNodeRollingUpgrade");
|
||||
LOG.info("Restarting the DataNode");
|
||||
cluster.restartDataNode(dnprop, true);
|
||||
cluster.waitActive();
|
||||
|
||||
dn0 = cluster.getDataNodes().get(0);
|
||||
LOG.info("The DN has been restarted");
|
||||
assertFalse(trashFile0.exists());
|
||||
assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0]));
|
||||
|
||||
// Ensure that the block file for the first file was moved from 'trash' to 'previous'.
|
||||
assertTrue(isBlockFileInPrevious(blockFiles[0]));
|
||||
assertFalse(isTrashRootPresent());
|
||||
|
||||
// Delete the second file. Ensure that its block file is in previous.
|
||||
blockFiles[1] = getBlockForFile(paths[1], true);
|
||||
fs.delete(paths[1], false);
|
||||
assertTrue(isBlockFileInPrevious(blockFiles[1]));
|
||||
assertFalse(isTrashRootPresent());
|
||||
|
||||
// Rollback and ensure that neither block file exists in trash or previous.
|
||||
finalizeRollingUpgrade();
|
||||
assertFalse(isTrashRootPresent());
|
||||
assertFalse(isBlockFileInPrevious(blockFiles[0]));
|
||||
assertFalse(isBlockFileInPrevious(blockFiles[1]));
|
||||
} finally {
|
||||
shutdownCluster();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Support for layout version change with rolling upgrade was
|
||||
* added by HDFS-6800 and HDFS-6981.
|
||||
*/
|
||||
@Test(timeout=300000)
|
||||
public void testWithLayoutChangeAndRollback() throws Exception {
|
||||
final long seed = 0x600DF00D;
|
||||
try {
|
||||
startCluster();
|
||||
|
||||
Path[] paths = new Path[3];
|
||||
File[] blockFiles = new File[3];
|
||||
|
||||
// Create two files in DFS.
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat");
|
||||
DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 1, seed);
|
||||
}
|
||||
|
||||
startRollingUpgrade();
|
||||
|
||||
// Delete the first file. The DN will save its block files in trash.
|
||||
blockFiles[0] = getBlockForFile(paths[0], true);
|
||||
File trashFile0 = getTrashFileForBlock(blockFiles[0], false);
|
||||
deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);
|
||||
|
||||
// Restart the DN with a new layout version to trigger layout upgrade.
|
||||
LOG.info("Shutting down the Datanode");
|
||||
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
|
||||
DFSTestUtil.addDataNodeLayoutVersion(
|
||||
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1,
|
||||
"Test Layout for TestDataNodeRollingUpgrade");
|
||||
LOG.info("Restarting the DataNode");
|
||||
cluster.restartDataNode(dnprop, true);
|
||||
cluster.waitActive();
|
||||
|
||||
dn0 = cluster.getDataNodes().get(0);
|
||||
LOG.info("The DN has been restarted");
|
||||
assertFalse(trashFile0.exists());
|
||||
assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0]));
|
||||
|
||||
// Ensure that the block file for the first file was moved from 'trash' to 'previous'.
|
||||
assertTrue(isBlockFileInPrevious(blockFiles[0]));
|
||||
assertFalse(isTrashRootPresent());
|
||||
|
||||
// Delete the second file. Ensure that its block file is in previous.
|
||||
blockFiles[1] = getBlockForFile(paths[1], true);
|
||||
fs.delete(paths[1], false);
|
||||
assertTrue(isBlockFileInPrevious(blockFiles[1]));
|
||||
assertFalse(isTrashRootPresent());
|
||||
|
||||
// Create and delete a third file. Its block file should not be
|
||||
// in either trash or previous after deletion.
|
||||
paths[2] = new Path("/" + GenericTestUtils.getMethodName() + ".2.dat");
|
||||
DFSTestUtil.createFile(fs, paths[2], BLOCK_SIZE, (short) 1, seed);
|
||||
blockFiles[2] = getBlockForFile(paths[2], true);
|
||||
fs.delete(paths[2], false);
|
||||
assertFalse(isBlockFileInPrevious(blockFiles[2]));
|
||||
assertFalse(isTrashRootPresent());
|
||||
|
||||
// Rollback and ensure that the first two file contents were restored.
|
||||
rollbackRollingUpgrade();
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
byte[] actual = DFSTestUtil.readFileBuffer(fs, paths[i]);
|
||||
byte[] calculated = DFSTestUtil.calculateFileContentsFromSeed(seed, BLOCK_SIZE);
|
||||
assertArrayEquals(actual, calculated);
|
||||
}
|
||||
|
||||
// And none of the block files must be in previous or trash.
|
||||
assertFalse(isTrashRootPresent());
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
assertFalse(isBlockFileInPrevious(blockFiles[i]));
|
||||
}
|
||||
} finally {
|
||||
shutdownCluster();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ import org.junit.BeforeClass;
|
|||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
/**
|
||||
* Tests NameNode interaction for all XAttr APIs.
|
||||
|
@ -129,51 +130,73 @@ public class FSXAttrBaseTest {
|
|||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testCreateXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
Map<String, byte[]> expectedXAttrs = Maps.newHashMap();
|
||||
expectedXAttrs.put(name1, value1);
|
||||
expectedXAttrs.put(name2, null);
|
||||
doTestCreateXAttr(path, expectedXAttrs);
|
||||
expectedXAttrs.put(raw1, value1);
|
||||
doTestCreateXAttr(rawPath, expectedXAttrs);
|
||||
}
|
||||
|
||||
private void doTestCreateXAttr(Path usePath, Map<String,
|
||||
byte[]> expectedXAttrs) throws Exception {
|
||||
FileSystem.mkdirs(fs, usePath, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(usePath);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(usePath, name1);
|
||||
|
||||
xattrs = fs.getXAttrs(path);
|
||||
xattrs = fs.getXAttrs(usePath);
|
||||
Assert.assertEquals(xattrs.size(), 0);
|
||||
|
||||
// Create xattr which already exists.
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
try {
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
Assert.fail("Creating xattr which already exists should fail.");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(usePath, name1);
|
||||
|
||||
// Create two xattrs
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
// Create the xattrs
|
||||
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
|
||||
fs.setXAttr(usePath, ent.getKey(), ent.getValue(),
|
||||
EnumSet.of(XAttrSetFlag.CREATE));
|
||||
}
|
||||
xattrs = fs.getXAttrs(usePath);
|
||||
Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
|
||||
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
|
||||
final byte[] val =
|
||||
(ent.getValue() == null) ? new byte[0] : ent.getValue();
|
||||
Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
|
||||
}
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
xattrs = fs.getXAttrs(usePath);
|
||||
Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
|
||||
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
|
||||
final byte[] val =
|
||||
(ent.getValue() == null) ? new byte[0] : ent.getValue();
|
||||
Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
|
||||
}
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
xattrs = fs.getXAttrs(usePath);
|
||||
Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
|
||||
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
|
||||
final byte[] val =
|
||||
(ent.getValue() == null) ? new byte[0] : ent.getValue();
|
||||
Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
|
||||
}
|
||||
|
||||
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
|
||||
fs.removeXAttr(usePath, ent.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -223,7 +223,7 @@ public class NameNodeAdapter {
|
|||
* if safemode is not running.
|
||||
*/
|
||||
public static int getSafeModeSafeBlocks(NameNode nn) {
|
||||
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo();
|
||||
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
|
||||
if (smi == null) {
|
||||
return -1;
|
||||
}
|
||||
|
|
Binary file not shown.
|
@ -1,6 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<EDITS>
|
||||
<EDITS_VERSION>-59</EDITS_VERSION>
|
||||
<EDITS_VERSION>-60</EDITS_VERSION>
|
||||
<RECORD>
|
||||
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
|
||||
<DATA>
|
||||
|
@ -13,8 +13,8 @@
|
|||
<TXID>2</TXID>
|
||||
<DELEGATION_KEY>
|
||||
<KEY_ID>1</KEY_ID>
|
||||
<EXPIRY_DATE>1410634509611</EXPIRY_DATE>
|
||||
<KEY>201e2572e03d089c</KEY>
|
||||
<EXPIRY_DATE>1410915997709</EXPIRY_DATE>
|
||||
<KEY>309e81e09dc6c75a</KEY>
|
||||
</DELEGATION_KEY>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -24,8 +24,8 @@
|
|||
<TXID>3</TXID>
|
||||
<DELEGATION_KEY>
|
||||
<KEY_ID>2</KEY_ID>
|
||||
<EXPIRY_DATE>1410634509614</EXPIRY_DATE>
|
||||
<KEY>ef4542e27fb38301</KEY>
|
||||
<EXPIRY_DATE>1410915997711</EXPIRY_DATE>
|
||||
<KEY>8a2399843e754bee</KEY>
|
||||
</DELEGATION_KEY>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -37,10 +37,10 @@
|
|||
<INODEID>16386</INODEID>
|
||||
<PATH>/file_create</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310217</MTIME>
|
||||
<ATIME>1409943310217</ATIME>
|
||||
<MTIME>1410224798292</MTIME>
|
||||
<ATIME>1410224798292</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_588677218_1</CLIENT_NAME>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1374813776_1</CLIENT_NAME>
|
||||
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
|
||||
<OVERWRITE>true</OVERWRITE>
|
||||
<PERMISSION_STATUS>
|
||||
|
@ -48,7 +48,7 @@
|
|||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
<MODE>420</MODE>
|
||||
</PERMISSION_STATUS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>5</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -60,8 +60,8 @@
|
|||
<INODEID>0</INODEID>
|
||||
<PATH>/file_create</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310240</MTIME>
|
||||
<ATIME>1409943310217</ATIME>
|
||||
<MTIME>1410224798315</MTIME>
|
||||
<ATIME>1410224798292</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME></CLIENT_NAME>
|
||||
<CLIENT_MACHINE></CLIENT_MACHINE>
|
||||
|
@ -88,8 +88,8 @@
|
|||
<LENGTH>0</LENGTH>
|
||||
<SRC>/file_create</SRC>
|
||||
<DST>/file_moved</DST>
|
||||
<TIMESTAMP>1409943310247</TIMESTAMP>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<TIMESTAMP>1410224798322</TIMESTAMP>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>8</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -99,8 +99,8 @@
|
|||
<TXID>8</TXID>
|
||||
<LENGTH>0</LENGTH>
|
||||
<PATH>/file_moved</PATH>
|
||||
<TIMESTAMP>1409943310253</TIMESTAMP>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<TIMESTAMP>1410224798328</TIMESTAMP>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>9</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -111,7 +111,7 @@
|
|||
<LENGTH>0</LENGTH>
|
||||
<INODEID>16387</INODEID>
|
||||
<PATH>/directory_mkdir</PATH>
|
||||
<TIMESTAMP>1409943310260</TIMESTAMP>
|
||||
<TIMESTAMP>1410224798335</TIMESTAMP>
|
||||
<PERMISSION_STATUS>
|
||||
<USERNAME>jing</USERNAME>
|
||||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
|
@ -146,7 +146,7 @@
|
|||
<TXID>13</TXID>
|
||||
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
|
||||
<SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>14</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -157,7 +157,7 @@
|
|||
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
|
||||
<SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
|
||||
<SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>15</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -167,7 +167,7 @@
|
|||
<TXID>15</TXID>
|
||||
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
|
||||
<SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>16</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -179,10 +179,10 @@
|
|||
<INODEID>16388</INODEID>
|
||||
<PATH>/file_create</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310284</MTIME>
|
||||
<ATIME>1409943310284</ATIME>
|
||||
<MTIME>1410224798359</MTIME>
|
||||
<ATIME>1410224798359</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_588677218_1</CLIENT_NAME>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1374813776_1</CLIENT_NAME>
|
||||
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
|
||||
<OVERWRITE>true</OVERWRITE>
|
||||
<PERMISSION_STATUS>
|
||||
|
@ -190,7 +190,7 @@
|
|||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
<MODE>420</MODE>
|
||||
</PERMISSION_STATUS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>17</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -202,8 +202,8 @@
|
|||
<INODEID>0</INODEID>
|
||||
<PATH>/file_create</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310286</MTIME>
|
||||
<ATIME>1409943310284</ATIME>
|
||||
<MTIME>1410224798361</MTIME>
|
||||
<ATIME>1410224798359</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME></CLIENT_NAME>
|
||||
<CLIENT_MACHINE></CLIENT_MACHINE>
|
||||
|
@ -265,9 +265,9 @@
|
|||
<LENGTH>0</LENGTH>
|
||||
<SRC>/file_create</SRC>
|
||||
<DST>/file_moved</DST>
|
||||
<TIMESTAMP>1409943310302</TIMESTAMP>
|
||||
<TIMESTAMP>1410224798379</TIMESTAMP>
|
||||
<OPTIONS>NONE</OPTIONS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>24</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -279,10 +279,10 @@
|
|||
<INODEID>16389</INODEID>
|
||||
<PATH>/file_concat_target</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310306</MTIME>
|
||||
<ATIME>1409943310306</ATIME>
|
||||
<MTIME>1410224798382</MTIME>
|
||||
<ATIME>1410224798382</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_588677218_1</CLIENT_NAME>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1374813776_1</CLIENT_NAME>
|
||||
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
|
||||
<OVERWRITE>true</OVERWRITE>
|
||||
<PERMISSION_STATUS>
|
||||
|
@ -290,7 +290,7 @@
|
|||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
<MODE>420</MODE>
|
||||
</PERMISSION_STATUS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>26</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -396,8 +396,8 @@
|
|||
<INODEID>0</INODEID>
|
||||
<PATH>/file_concat_target</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310402</MTIME>
|
||||
<ATIME>1409943310306</ATIME>
|
||||
<MTIME>1410224798476</MTIME>
|
||||
<ATIME>1410224798382</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME></CLIENT_NAME>
|
||||
<CLIENT_MACHINE></CLIENT_MACHINE>
|
||||
|
@ -432,10 +432,10 @@
|
|||
<INODEID>16390</INODEID>
|
||||
<PATH>/file_concat_0</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310407</MTIME>
|
||||
<ATIME>1409943310407</ATIME>
|
||||
<MTIME>1410224798479</MTIME>
|
||||
<ATIME>1410224798479</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_588677218_1</CLIENT_NAME>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1374813776_1</CLIENT_NAME>
|
||||
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
|
||||
<OVERWRITE>true</OVERWRITE>
|
||||
<PERMISSION_STATUS>
|
||||
|
@ -443,7 +443,7 @@
|
|||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
<MODE>420</MODE>
|
||||
</PERMISSION_STATUS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>39</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -549,8 +549,8 @@
|
|||
<INODEID>0</INODEID>
|
||||
<PATH>/file_concat_0</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310430</MTIME>
|
||||
<ATIME>1409943310407</ATIME>
|
||||
<MTIME>1410224798501</MTIME>
|
||||
<ATIME>1410224798479</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME></CLIENT_NAME>
|
||||
<CLIENT_MACHINE></CLIENT_MACHINE>
|
||||
|
@ -585,10 +585,10 @@
|
|||
<INODEID>16391</INODEID>
|
||||
<PATH>/file_concat_1</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310434</MTIME>
|
||||
<ATIME>1409943310434</ATIME>
|
||||
<MTIME>1410224798504</MTIME>
|
||||
<ATIME>1410224798504</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_588677218_1</CLIENT_NAME>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1374813776_1</CLIENT_NAME>
|
||||
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
|
||||
<OVERWRITE>true</OVERWRITE>
|
||||
<PERMISSION_STATUS>
|
||||
|
@ -596,7 +596,7 @@
|
|||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
<MODE>420</MODE>
|
||||
</PERMISSION_STATUS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>51</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -702,8 +702,8 @@
|
|||
<INODEID>0</INODEID>
|
||||
<PATH>/file_concat_1</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310457</MTIME>
|
||||
<ATIME>1409943310434</ATIME>
|
||||
<MTIME>1410224798530</MTIME>
|
||||
<ATIME>1410224798504</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME></CLIENT_NAME>
|
||||
<CLIENT_MACHINE></CLIENT_MACHINE>
|
||||
|
@ -736,12 +736,12 @@
|
|||
<TXID>57</TXID>
|
||||
<LENGTH>0</LENGTH>
|
||||
<TRG>/file_concat_target</TRG>
|
||||
<TIMESTAMP>1409943310460</TIMESTAMP>
|
||||
<TIMESTAMP>1410224798533</TIMESTAMP>
|
||||
<SOURCES>
|
||||
<SOURCE1>/file_concat_0</SOURCE1>
|
||||
<SOURCE2>/file_concat_1</SOURCE2>
|
||||
</SOURCES>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>62</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -753,14 +753,14 @@
|
|||
<INODEID>16392</INODEID>
|
||||
<PATH>/file_symlink</PATH>
|
||||
<VALUE>/file_concat_target</VALUE>
|
||||
<MTIME>1409943310463</MTIME>
|
||||
<ATIME>1409943310463</ATIME>
|
||||
<MTIME>1410224798537</MTIME>
|
||||
<ATIME>1410224798537</ATIME>
|
||||
<PERMISSION_STATUS>
|
||||
<USERNAME>jing</USERNAME>
|
||||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
<MODE>511</MODE>
|
||||
</PERMISSION_STATUS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>63</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -772,10 +772,10 @@
|
|||
<INODEID>16393</INODEID>
|
||||
<PATH>/hard-lease-recovery-test</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943310466</MTIME>
|
||||
<ATIME>1409943310466</ATIME>
|
||||
<MTIME>1410224798540</MTIME>
|
||||
<ATIME>1410224798540</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_588677218_1</CLIENT_NAME>
|
||||
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1374813776_1</CLIENT_NAME>
|
||||
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
|
||||
<OVERWRITE>true</OVERWRITE>
|
||||
<PERMISSION_STATUS>
|
||||
|
@ -783,7 +783,7 @@
|
|||
<GROUPNAME>supergroup</GROUPNAME>
|
||||
<MODE>420</MODE>
|
||||
</PERMISSION_STATUS>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>64</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -840,7 +840,7 @@
|
|||
<OPCODE>OP_REASSIGN_LEASE</OPCODE>
|
||||
<DATA>
|
||||
<TXID>65</TXID>
|
||||
<LEASEHOLDER>DFSClient_NONMAPREDUCE_588677218_1</LEASEHOLDER>
|
||||
<LEASEHOLDER>DFSClient_NONMAPREDUCE_1374813776_1</LEASEHOLDER>
|
||||
<PATH>/hard-lease-recovery-test</PATH>
|
||||
<NEWHOLDER>HDFS_NameNode</NEWHOLDER>
|
||||
</DATA>
|
||||
|
@ -853,8 +853,8 @@
|
|||
<INODEID>0</INODEID>
|
||||
<PATH>/hard-lease-recovery-test</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<MTIME>1409943313176</MTIME>
|
||||
<ATIME>1409943310466</ATIME>
|
||||
<MTIME>1410224801265</MTIME>
|
||||
<ATIME>1410224798540</ATIME>
|
||||
<BLOCKSIZE>512</BLOCKSIZE>
|
||||
<CLIENT_NAME></CLIENT_NAME>
|
||||
<CLIENT_MACHINE></CLIENT_MACHINE>
|
||||
|
@ -881,7 +881,7 @@
|
|||
<MODE>493</MODE>
|
||||
<LIMIT>9223372036854775807</LIMIT>
|
||||
<MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>71</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -891,7 +891,7 @@
|
|||
<TXID>68</TXID>
|
||||
<POOLNAME>pool1</POOLNAME>
|
||||
<LIMIT>99</LIMIT>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>72</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -903,8 +903,8 @@
|
|||
<PATH>/path</PATH>
|
||||
<REPLICATION>1</REPLICATION>
|
||||
<POOL>pool1</POOL>
|
||||
<EXPIRATION>2305844419157007447</EXPIRATION>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<EXPIRATION>2305844419438495525</EXPIRATION>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>73</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -914,7 +914,7 @@
|
|||
<TXID>70</TXID>
|
||||
<ID>1</ID>
|
||||
<REPLICATION>2</REPLICATION>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>74</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -923,7 +923,7 @@
|
|||
<DATA>
|
||||
<TXID>71</TXID>
|
||||
<ID>1</ID>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>75</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -932,7 +932,7 @@
|
|||
<DATA>
|
||||
<TXID>72</TXID>
|
||||
<POOLNAME>pool1</POOLNAME>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>76</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -979,7 +979,7 @@
|
|||
<NAME>a1</NAME>
|
||||
<VALUE>0x313233</VALUE>
|
||||
</XATTR>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>78</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -993,7 +993,7 @@
|
|||
<NAME>a2</NAME>
|
||||
<VALUE>0x373839</VALUE>
|
||||
</XATTR>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>79</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -1006,7 +1006,7 @@
|
|||
<NAMESPACE>USER</NAMESPACE>
|
||||
<NAME>a2</NAME>
|
||||
</XATTR>
|
||||
<RPC_CLIENTID>e33c0cc9-12b1-49a2-80eb-4d35d3b79960</RPC_CLIENTID>
|
||||
<RPC_CLIENTID>b53e8d0a-8d92-4067-b8c8-637ac951bac2</RPC_CLIENTID>
|
||||
<RPC_CALLID>80</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
|
@ -1014,14 +1014,14 @@
|
|||
<OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
|
||||
<DATA>
|
||||
<TXID>77</TXID>
|
||||
<STARTTIME>1409943313537</STARTTIME>
|
||||
<STARTTIME>1410224801616</STARTTIME>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
<RECORD>
|
||||
<OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
|
||||
<DATA>
|
||||
<TXID>78</TXID>
|
||||
<FINALIZETIME>1409943313537</FINALIZETIME>
|
||||
<FINALIZETIME>1410224801616</FINALIZETIME>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
<RECORD>
|
||||
|
|
|
@ -179,26 +179,8 @@ public class MRAMSimulator extends AMSimulator {
|
|||
return rm.getApplicationMasterService().allocate(request);
|
||||
}
|
||||
});
|
||||
|
||||
// waiting until the AM container is allocated
|
||||
while (true) {
|
||||
if (response != null && ! response.getAllocatedContainers().isEmpty()) {
|
||||
// get AM container
|
||||
Container container = response.getAllocatedContainers().get(0);
|
||||
se.getNmMap().get(container.getNodeId())
|
||||
.addNewContainer(container, -1L);
|
||||
// start AM container
|
||||
amContainer = container;
|
||||
LOG.debug(MessageFormat.format("Application {0} starts its " +
|
||||
"AM container ({1}).", appId, amContainer.getId()));
|
||||
isAMContainerRunning = true;
|
||||
break;
|
||||
}
|
||||
// this sleep time is different from HeartBeat
|
||||
Thread.sleep(1000);
|
||||
// send out empty request
|
||||
sendContainerRequest();
|
||||
response = responseQueue.take();
|
||||
if (response != null) {
|
||||
responseQueue.put(response);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -206,6 +188,26 @@ public class MRAMSimulator extends AMSimulator {
|
|||
@SuppressWarnings("unchecked")
|
||||
protected void processResponseQueue()
|
||||
throws InterruptedException, YarnException, IOException {
|
||||
// Check whether receive the am container
|
||||
if (!isAMContainerRunning) {
|
||||
if (!responseQueue.isEmpty()) {
|
||||
AllocateResponse response = responseQueue.take();
|
||||
if (response != null
|
||||
&& !response.getAllocatedContainers().isEmpty()) {
|
||||
// Get AM container
|
||||
Container container = response.getAllocatedContainers().get(0);
|
||||
se.getNmMap().get(container.getNodeId())
|
||||
.addNewContainer(container, -1L);
|
||||
// Start AM container
|
||||
amContainer = container;
|
||||
LOG.debug(MessageFormat.format("Application {0} starts its " +
|
||||
"AM container ({1}).", appId, amContainer.getId()));
|
||||
isAMContainerRunning = true;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
while (! responseQueue.isEmpty()) {
|
||||
AllocateResponse response = responseQueue.take();
|
||||
|
||||
|
@ -262,6 +264,7 @@ public class MRAMSimulator extends AMSimulator {
|
|||
LOG.debug(MessageFormat.format("Application {0} sends out event " +
|
||||
"to clean up its AM container.", appId));
|
||||
isFinished = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// check allocated containers
|
||||
|
|
|
@ -15,6 +15,9 @@ Trunk - Unreleased
|
|||
YARN-524 TestYarnVersionInfo failing if generated properties doesn't
|
||||
include an SVN URL. (stevel)
|
||||
|
||||
YARN-1471. The SLS simulator is not running the preemption policy
|
||||
for CapacityScheduler (Carlo Curino via cdouglas)
|
||||
|
||||
YARN-2216 TestRMApplicationHistoryWriter sometimes fails in trunk.
|
||||
(Zhijie Shen via xgong)
|
||||
|
||||
|
@ -299,6 +302,9 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-2519. Credential Provider related unit tests failed on Windows.
|
||||
(Xiaoyu Yao via cnauroth)
|
||||
|
||||
YARN-2526. SLS can deadlock when all the threads are taken by AMSimulators.
|
||||
(Wei Yan via kasha)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
Loading…
Reference in New Issue