diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 36fe52b7b5d..5461a34d379 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -328,6 +328,8 @@ Trunk (Unreleased) HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu) + HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via tucu) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) @@ -476,6 +478,9 @@ Release 2.5.0 - UNRELEASED HADOOP-10585. Retry polices ignore interrupted exceptions (Daryn Sharp via jeagles) + HADOOP-10401. ShellBasedUnixGroupsMapping#getGroups does not always return + primary group first (Akira AJISAKA via Colin Patrick McCabe) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java index 3d56640e11e..cd6109161b2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java @@ -89,6 +89,8 @@ public int run(String[] args) throws Exception { * @throws IOException */ private int init(String[] args) throws IOException { + final Options options = KeyProvider.options(getConf()); + for (int i = 0; i < args.length; i++) { // parse command line boolean moreTokens = (i < args.length - 1); if (args[i].equals("create")) { @@ -97,7 +99,7 @@ private int init(String[] args) throws IOException { keyName = args[++i]; } - command = new CreateCommand(keyName); + command = new CreateCommand(keyName, options); if ("--help".equals(keyName)) { printKeyShellUsage(); return -1; @@ -127,9 +129,11 @@ private int init(String[] args) throws IOException { } else if ("list".equals(args[i])) { command = new ListCommand(); } else if ("--size".equals(args[i]) && moreTokens) { - getConf().set(KeyProvider.DEFAULT_BITLENGTH_NAME, args[++i]); + options.setBitLength(Integer.parseInt(args[++i])); } else if ("--cipher".equals(args[i]) && moreTokens) { - getConf().set(KeyProvider.DEFAULT_CIPHER_NAME, args[++i]); + options.setCipher(args[++i]); + } else if ("--description".equals(args[i]) && moreTokens) { + options.setDescription(args[++i]); } else if ("--provider".equals(args[i]) && moreTokens) { userSuppliedProvider = true; getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]); @@ -399,6 +403,7 @@ public String getUsage() { private class CreateCommand extends Command { public static final String USAGE = "create [--cipher ] [--size ]\n" + + " [--description ]\n" + " [--provider ] [--help]"; public static final String DESC = "The create subcommand creates a new key for the name specified\n" + @@ -408,10 +413,12 @@ private class CreateCommand extends Command { "The default keysize is 256. You may specify the requested key\n" + "length using the --size argument.\n"; - String keyName = null; + final String keyName; + final Options options; - public CreateCommand(String keyName) { + public CreateCommand(String keyName, Options options) { this.keyName = keyName; + this.options = options; } public boolean validate() { @@ -434,7 +441,6 @@ public boolean validate() { public void execute() throws IOException, NoSuchAlgorithmException { warnIfTransientProvider(); try { - Options options = KeyProvider.options(getConf()); provider.createKey(keyName, options); out.println(keyName + " has been successfully created."); provider.flush(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index c20f054d5e4..b261f7fdedf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -99,6 +99,21 @@ public FileStatus(long length, boolean isdir, assert (isdir && symlink == null) || !isdir; } + /** + * Copy constructor. + * + * @param other FileStatus to copy + */ + public FileStatus(FileStatus other) throws IOException { + // It's important to call the getters here instead of directly accessing the + // members. Subclasses like ViewFsFileStatus can override the getters. + this(other.getLen(), other.isDirectory(), other.getReplication(), + other.getBlockSize(), other.getModificationTime(), other.getAccessTime(), + other.getPermission(), other.getOwner(), other.getGroup(), + (other.isSymlink() ? other.getSymlink() : null), + other.getPath()); + } + /** * Get the length of this file, in bytes. * @return the length of this file, in bytes. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java new file mode 100644 index 00000000000..2811a89f24d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.collect.Lists; + +/** + * AclUtil contains utility methods for manipulating ACLs. + */ +@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) +@InterfaceStability.Unstable +public final class AclUtil { + + /** + * Given permissions and extended ACL entries, returns the full logical ACL. + * + * @param perm FsPermission containing permissions + * @param entries List containing extended ACL entries + * @return List containing full logical ACL + */ + public static List getAclFromPermAndEntries(FsPermission perm, + List entries) { + List acl = Lists.newArrayListWithCapacity(entries.size() + 3); + + // Owner entry implied by owner permission bits. + acl.add(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .setPermission(perm.getUserAction()) + .build()); + + // All extended access ACL entries. + boolean hasAccessAcl = false; + Iterator entryIter = entries.iterator(); + AclEntry curEntry = null; + while (entryIter.hasNext()) { + curEntry = entryIter.next(); + if (curEntry.getScope() == AclEntryScope.DEFAULT) { + break; + } + hasAccessAcl = true; + acl.add(curEntry); + } + + // Mask entry implied by group permission bits, or group entry if there is + // no access ACL (only default ACL). + acl.add(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP) + .setPermission(perm.getGroupAction()) + .build()); + + // Other entry implied by other bits. + acl.add(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER) + .setPermission(perm.getOtherAction()) + .build()); + + // Default ACL entries. + if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) { + acl.add(curEntry); + while (entryIter.hasNext()) { + acl.add(entryIter.next()); + } + } + + return acl; + } + + /** + * Translates the given permission bits to the equivalent minimal ACL. + * + * @param perm FsPermission to translate + * @return List containing exactly 3 entries representing the owner, + * group and other permissions + */ + public static List getMinimalAcl(FsPermission perm) { + return Lists.newArrayList( + new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .setPermission(perm.getUserAction()) + .build(), + new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP) + .setPermission(perm.getGroupAction()) + .build(), + new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER) + .setPermission(perm.getOtherAction()) + .build()); + } + + /** + * Checks if the given entries represent a minimal ACL (contains exactly 3 + * entries). + * + * @param entries List entries to check + * @return boolean true if the entries represent a minimal ACL + */ + public static boolean isMinimalAcl(List entries) { + return entries.size() == 3; + } + + /** + * There is no reason to instantiate this class. + */ + private AclUtil() { + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java index 28956098c79..ee84437d8e1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java @@ -158,6 +158,17 @@ public short toShort() { return (short)s; } + /** + * Encodes the object to a short. Unlike {@link #toShort()}, this method may + * return values outside the fixed range 00000 - 01777 if extended features + * are encoded into this permission, such as the ACL bit. + * + * @return short extended short representation of this permission + */ + public short toExtendedShort() { + return toShort(); + } + @Override public boolean equals(Object obj) { if (obj instanceof FsPermission) { @@ -273,6 +284,16 @@ public boolean getStickyBit() { return stickyBit; } + /** + * Returns true if there is also an ACL (access control list). + * + * @return boolean true if there is also an ACL (access control list). + */ + public boolean getAclBit() { + // File system subclasses that support the ACL bit would override this. + return false; + } + /** Set the user file creation mask (umask) */ public static void setUMask(Configuration conf, FsPermission umask) { conf.set(UMASK_LABEL, String.format("%1$03o", umask.toShort())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java similarity index 93% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java index d841d3689a4..a16f4393652 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java @@ -15,12 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.fs.permission; import java.util.Collections; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -28,8 +29,9 @@ * Groups a list of ACL entries into separate lists for access entries vs. * default entries. */ -@InterfaceAudience.Private -final class ScopedAclEntries { +@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) +@InterfaceStability.Unstable +public final class ScopedAclEntries { private static final int PIVOT_NOT_FOUND = -1; private final List accessEntries; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java index f17457cafde..5aa285c2965 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.shell; import java.io.IOException; -import java.util.Iterator; +import java.util.Collections; import java.util.LinkedList; import java.util.List; @@ -31,8 +31,10 @@ import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.ScopedAclEntries; /** * Acl related operations @@ -75,84 +77,43 @@ protected void processOptions(LinkedList args) throws IOException { @Override protected void processPath(PathData item) throws IOException { - AclStatus aclStatus = item.fs.getAclStatus(item.path); out.println("# file: " + item); - out.println("# owner: " + aclStatus.getOwner()); - out.println("# group: " + aclStatus.getGroup()); - List entries = aclStatus.getEntries(); - if (aclStatus.isStickyBit()) { - String stickyFlag = "T"; - for (AclEntry aclEntry : entries) { - if (aclEntry.getType() == AclEntryType.OTHER - && aclEntry.getScope() == AclEntryScope.ACCESS - && aclEntry.getPermission().implies(FsAction.EXECUTE)) { - stickyFlag = "t"; - break; - } - } - out.println("# flags: --" + stickyFlag); - } - + out.println("# owner: " + item.stat.getOwner()); + out.println("# group: " + item.stat.getGroup()); FsPermission perm = item.stat.getPermission(); - if (entries.isEmpty()) { - printMinimalAcl(perm); - } else { - printExtendedAcl(perm, entries); + if (perm.getStickyBit()) { + out.println("# flags: --" + + (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T")); } + List entries = perm.getAclBit() ? + item.fs.getAclStatus(item.path).getEntries() : + Collections.emptyList(); + ScopedAclEntries scopedEntries = new ScopedAclEntries( + AclUtil.getAclFromPermAndEntries(perm, entries)); + printAclEntriesForSingleScope(scopedEntries.getAccessEntries()); + printAclEntriesForSingleScope(scopedEntries.getDefaultEntries()); out.println(); } /** - * Prints an extended ACL, including all extended ACL entries and also the - * base entries implied by the permission bits. + * Prints all the ACL entries in a single scope. * - * @param perm FsPermission of file * @param entries List containing ACL entries of file */ - private void printExtendedAcl(FsPermission perm, List entries) { - // Print owner entry implied by owner permission bits. - out.println(new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.USER) - .setPermission(perm.getUserAction()) - .build()); - - // Print all extended access ACL entries. - boolean hasAccessAcl = false; - Iterator entryIter = entries.iterator(); - AclEntry curEntry = null; - while (entryIter.hasNext()) { - curEntry = entryIter.next(); - if (curEntry.getScope() == AclEntryScope.DEFAULT) { - break; - } - hasAccessAcl = true; - printExtendedAclEntry(curEntry, perm.getGroupAction()); + private void printAclEntriesForSingleScope(List entries) { + if (entries.isEmpty()) { + return; } - - // Print mask entry implied by group permission bits, or print group entry - // if there is no access ACL (only default ACL). - out.println(new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP) - .setPermission(perm.getGroupAction()) - .build()); - - // Print other entry implied by other bits. - out.println(new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.OTHER) - .setPermission(perm.getOtherAction()) - .build()); - - // Print default ACL entries. - if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) { - out.println(curEntry); - // ACL sort order guarantees default mask is the second-to-last entry. + if (AclUtil.isMinimalAcl(entries)) { + for (AclEntry entry: entries) { + out.println(entry); + } + } else { + // ACL sort order guarantees mask is the second-to-last entry. FsAction maskPerm = entries.get(entries.size() - 2).getPermission(); - while (entryIter.hasNext()) { - printExtendedAclEntry(entryIter.next(), maskPerm); + for (AclEntry entry: entries) { + printExtendedAclEntry(entry, maskPerm); } } } @@ -180,30 +141,6 @@ private void printExtendedAclEntry(AclEntry entry, FsAction maskPerm) { out.println(entry); } } - - /** - * Prints a minimal ACL, consisting of exactly 3 ACL entries implied by the - * permission bits. - * - * @param perm FsPermission of file - */ - private void printMinimalAcl(FsPermission perm) { - out.println(new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.USER) - .setPermission(perm.getUserAction()) - .build()); - out.println(new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.GROUP) - .setPermission(perm.getGroupAction()) - .build()); - out.println(new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.OTHER) - .setPermission(perm.getOtherAction()) - .build()); - } } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java index dff7719afe0..b2a1fbd806f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java @@ -31,8 +31,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.ipc.RpcNoSuchMethodException; import com.google.common.collect.Sets; @@ -116,7 +114,7 @@ protected void processPath(PathData item) throws IOException { FileStatus stat = item.stat; String line = String.format(lineFormat, (stat.isDirectory() ? "d" : "-"), - stat.getPermission() + (hasAcl(item) ? "+" : " "), + stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "), (stat.isFile() ? stat.getReplication() : "-"), stat.getOwner(), stat.getGroup(), @@ -153,49 +151,6 @@ private void adjustColumnWidths(PathData items[]) { lineFormat = fmt.toString(); } - /** - * Calls getAclStatus to determine if the given item has an ACL. For - * compatibility, this method traps errors caused by the RPC method missing - * from the server side. This would happen if the client was connected to an - * old NameNode that didn't have the ACL APIs. This method also traps the - * case of the client-side FileSystem not implementing the ACL APIs. - * FileSystem instances that do not support ACLs are remembered. This - * prevents the client from sending multiple failing RPC calls during a - * recursive ls. - * - * @param item PathData item to check - * @return boolean true if item has an ACL - * @throws IOException if there is a failure - */ - private boolean hasAcl(PathData item) throws IOException { - FileSystem fs = item.fs; - if (aclNotSupportedFsSet.contains(fs.getUri())) { - // This FileSystem failed to run the ACL API in an earlier iteration. - return false; - } - try { - return !fs.getAclStatus(item.path).getEntries().isEmpty(); - } catch (RemoteException e) { - // If this is a RpcNoSuchMethodException, then the client is connected to - // an older NameNode that doesn't support ACLs. Keep going. - IOException e2 = e.unwrapRemoteException(RpcNoSuchMethodException.class); - if (!(e2 instanceof RpcNoSuchMethodException)) { - throw e; - } - } catch (IOException e) { - // The NameNode supports ACLs, but they are not enabled. Keep going. - String message = e.getMessage(); - if (message != null && !message.contains("ACLs has been disabled")) { - throw e; - } - } catch (UnsupportedOperationException e) { - // The underlying FileSystem doesn't implement ACLs. Keep going. - } - // Remember that this FileSystem cannot support ACLs. - aclNotSupportedFsSet.add(fs.getUri()); - return false; - } - private int maxLength(int n, Object value) { return Math.max(n, (value != null) ? String.valueOf(value).length() : 0); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index d3a291c4a46..8555ad7d2c6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; +import java.util.Collection; import java.util.Map; import org.apache.commons.cli.Options; @@ -33,6 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.util.Tool; @@ -66,7 +68,7 @@ public abstract class HAAdmin extends Configured implements Tool { protected final static Map USAGE = ImmutableMap.builder() .put("-transitionToActive", - new UsageInfo("", "Transitions the service into Active state")) + new UsageInfo(" [--"+FORCEACTIVE+"]", "Transitions the service into Active state")) .put("-transitionToStandby", new UsageInfo("", "Transitions the service into Standby state")) .put("-failover", @@ -100,6 +102,10 @@ protected HAAdmin(Configuration conf) { } protected abstract HAServiceTarget resolveTarget(String string); + + protected Collection getTargetIds(String targetNodeToActivate) { + return Arrays.asList(new String[]{targetNodeToActivate}); + } protected String getUsageString() { return "Usage: HAAdmin"; @@ -133,6 +139,11 @@ private int transitionToActive(final CommandLine cmd) printUsage(errOut, "-transitionToActive"); return -1; } + /* returns true if other target node is active or some exception occurred + and forceActive was not set */ + if(isOtherTargetNodeActive(argv[0], cmd.hasOption(FORCEACTIVE))) { + return -1; + } HAServiceTarget target = resolveTarget(argv[0]); if (!checkManualStateManagementOK(target)) { return -1; @@ -142,7 +153,48 @@ private int transitionToActive(final CommandLine cmd) HAServiceProtocolHelper.transitionToActive(proto, createReqInfo()); return 0; } - + + /** + * Checks whether other target node is active or not + * @param targetNodeToActivate + * @return true if other target node is active or some other exception + * occurred and forceActive was set otherwise false + * @throws IOException + */ + private boolean isOtherTargetNodeActive(String targetNodeToActivate, boolean forceActive) + throws IOException { + Collection targetIds = getTargetIds(targetNodeToActivate); + if(targetIds == null) { + errOut.println("transitionToActive: No target node in the " + + "current configuration"); + printUsage(errOut, "-transitionToActive"); + return true; + } + targetIds.remove(targetNodeToActivate); + for(String targetId : targetIds) { + HAServiceTarget target = resolveTarget(targetId); + if (!checkManualStateManagementOK(target)) { + return true; + } + try { + HAServiceProtocol proto = target.getProxy(getConf(), 5000); + if(proto.getServiceStatus().getState() == HAServiceState.ACTIVE) { + errOut.println("transitionToActive: Node " + targetId +" is already active"); + printUsage(errOut, "-transitionToActive"); + return true; + } + } catch (Exception e) { + //If forceActive switch is false then return true + if(!forceActive) { + errOut.println("Unexpected error occurred " + e.getMessage()); + printUsage(errOut, "-transitionToActive"); + return true; + } + } + } + return false; + } + private int transitionToStandby(final CommandLine cmd) throws IOException, ServiceFailedException { String[] argv = cmd.getArgs(); @@ -364,6 +416,9 @@ protected int runCmd(String[] argv) throws Exception { if ("-failover".equals(cmd)) { addFailoverCliOpts(opts); } + if("-transitionToActive".equals(cmd)) { + addTransitionToActiveCliOpts(opts); + } // Mutative commands take FORCEMANUAL option if ("-transitionToActive".equals(cmd) || "-transitionToStandby".equals(cmd) || @@ -433,6 +488,14 @@ private void addFailoverCliOpts(Options failoverOpts) { // that change state. } + /** + * Add CLI options which are specific to the transitionToActive command and + * no others. + */ + private void addTransitionToActiveCliOpts(Options transitionToActiveCliOpts) { + transitionToActiveCliOpts.addOption(FORCEACTIVE, false, "force active"); + } + private CommandLine parseOpts(String cmdName, Options opts, String[] argv) { try { // Strip off the first arg, since that's just the command name diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 3689ebaa06e..11056eb00f6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -74,7 +74,8 @@ public void cacheGroupsAdd(List groups) throws IOException { * Get the current user's group list from Unix by running the command 'groups' * NOTE. For non-existing user it will return EMPTY list * @param user user name - * @return the groups list that the user belongs to + * @return the groups list that the user belongs to. The primary + * group is returned first. * @throws IOException if encounter any error when running the command */ private static List getUnixGroups(final String user) throws IOException { @@ -84,6 +85,7 @@ private static List getUnixGroups(final String user) throws IOException } catch (ExitCodeException e) { // if we didn't get the group - just return empty list; LOG.warn("got exception trying to get groups for user " + user, e); + return new LinkedList(); } StringTokenizer tokenizer = @@ -92,6 +94,17 @@ private static List getUnixGroups(final String user) throws IOException while (tokenizer.hasMoreTokens()) { groups.add(tokenizer.nextToken()); } + + // remove duplicated primary group + if (!Shell.WINDOWS) { + for (int i = 1; i < groups.size(); i++) { + if (groups.get(i).equals(groups.get(0))) { + groups.remove(i); + break; + } + } + } + return groups; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 61c0e8ddc31..0117fe5f117 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -132,11 +132,17 @@ public static String[] getGroupsCommand() { : new String[]{"bash", "-c", "groups"}; } - /** a Unix command to get a given user's groups list */ + /** + * a Unix command to get a given user's groups list. + * If the OS is not WINDOWS, the command will get the user's primary group + * first and finally get the groups list which includes the primary group. + * i.e. the user's primary group will be included twice. + */ public static String[] getGroupsForUserCommand(final String user) { //'groups username' command return is non-consistent across different unixes return (WINDOWS)? new String[] { WINUTILS, "groups", "-F", "\"" + user + "\""} - : new String [] {"bash", "-c", "id -Gn " + user}; + : new String [] {"bash", "-c", "id -gn " + user + + "&& id -Gn " + user}; } /** a Unix command to get a given netgroup's user list */ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java index ae6938eb68c..50f68f4dd38 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java @@ -111,6 +111,30 @@ public void testKeySuccessfulKeyLifecycle() throws Exception { assertFalse(outContent.toString(), outContent.toString().contains("key1")); } + /* HADOOP-10586 KeyShell didn't allow -description. */ + @Test + public void testKeySuccessfulCreationWithDescription() throws Exception { + outContent.reset(); + String[] args1 = {"create", "key1", "--provider", + "jceks://file" + tmpDir + "/keystore.jceks", + "--description", "someDescription"}; + int rc = 0; + KeyShell ks = new KeyShell(); + ks.setConf(new Configuration()); + rc = ks.run(args1); + assertEquals(0, rc); + assertTrue(outContent.toString().contains("key1 has been successfully " + + "created.")); + + outContent.reset(); + String[] args2a = {"list", "--metadata", "--provider", + "jceks://file" + tmpDir + "/keystore.jceks"}; + rc = ks.run(args2a); + assertEquals(0, rc); + assertTrue(outContent.toString().contains("description")); + assertTrue(outContent.toString().contains("someDescription")); + } + @Test public void testInvalidKeySize() throws Exception { String[] args1 = {"create", "key1", "--size", "56", "--provider", diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java index 34ed1c14fd1..1abea8efa59 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java @@ -113,7 +113,23 @@ private static void reportDuplicateEntry(final String header, "The new entry is to be ignored for the following reason.", DUPLICATE_NAME_ID_DEBUG_INFO)); } - + + /** + * uid and gid are defined as uint32 in linux. Some systems create + * (intended or unintended) kind of + * mapping, where 4294967294 is 2**32-2 as unsigned int32. As an example, + * https://bugzilla.redhat.com/show_bug.cgi?id=511876. + * Because user or group id are treated as Integer (signed integer or int32) + * here, the number 4294967294 is out of range. The solution is to convert + * uint32 to int32, so to map the out-of-range ID to the negative side of + * Integer, e.g. 4294967294 maps to -2 and 4294967295 maps to -1. + */ + private static Integer parseId(final String idStr) { + Long longVal = Long.parseLong(idStr); + int intVal = longVal.intValue(); + return Integer.valueOf(intVal); + } + /** * Get the whole list of users and groups and save them in the maps. * @throws IOException @@ -134,8 +150,8 @@ public static void updateMapInternal(BiMap map, String mapName, } LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]); // HDFS can't differentiate duplicate names with simple authentication - final Integer key = Integer.valueOf(nameId[1]); - final String value = nameId[0]; + final Integer key = parseId(nameId[1]); + final String value = nameId[0]; if (map.containsKey(key)) { final String prevValue = map.get(key); if (value.equals(prevValue)) { diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java index c1aba856c55..77477ff1693 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java @@ -66,6 +66,51 @@ public void testDuplicates() throws IOException { assertEquals("mapred3", gMap.get(498)); } + @Test + public void testIdOutOfIntegerRange() throws IOException { + String GET_ALL_USERS_CMD = "echo \"" + + "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n" + + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n" + + "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n" + + "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n" + + "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n" + + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\"" + + " | cut -d: -f1,3"; + String GET_ALL_GROUPS_CMD = "echo \"" + + "hdfs:*:11501:hrt_hdfs\n" + + "rpcuser:*:29:\n" + + "nfsnobody:*:4294967294:\n" + + "nfsnobody1:*:4294967295:\n" + + "maxint:*:2147483647:\n" + + "minint:*:2147483648:\n" + + "mapred3:x:498\"" + + " | cut -d: -f1,3"; + // Maps for id to name map + BiMap uMap = HashBiMap.create(); + BiMap gMap = HashBiMap.create(); + + IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":"); + assertTrue(uMap.size() == 7); + assertEquals("nfsnobody", uMap.get(-2)); + assertEquals("nfsnobody1", uMap.get(-1)); + assertEquals("maxint", uMap.get(2147483647)); + assertEquals("minint", uMap.get(-2147483648)); + assertEquals("archivebackup", uMap.get(1031)); + assertEquals("hdfs",uMap.get(11501)); + assertEquals("daemon", uMap.get(2)); + + IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":"); + assertTrue(gMap.size() == 7); + assertEquals("hdfs",gMap.get(11501)); + assertEquals("rpcuser", gMap.get(29)); + assertEquals("nfsnobody", gMap.get(-2)); + assertEquals("nfsnobody1", gMap.get(-1)); + assertEquals("maxint", gMap.get(2147483647)); + assertEquals("minint", gMap.get(-2147483648)); + assertEquals("mapred3", gMap.get(498)); + } + @Test public void testUserUpdateSetting() throws IOException { IdUserGroup iug = new IdUserGroup(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 553e555654c..e6f1c4edd37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -356,6 +356,19 @@ Release 2.5.0 - UNRELEASED HDFS-6186. Pause deletion of blocks when the namenode starts up. (jing9) + HDFS-6293. Issues with OIV processing PB-based fsimages. (kihwal) + + HDFS-2949. Add check to active state transition to prevent operator-induced + split brain. (Rushabh S Shah via kihwal) + + HDFS-6287. Add vecsum test of libhdfs read access times (cmccabe) + + HDFS-5683. Better audit log messages for caching operations. + (Abhiraj Butala via wang) + + HDFS-6345. DFS.listCacheDirectives() should allow filtering based on + cache directive ID. (wang) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) @@ -472,6 +485,13 @@ Release 2.5.0 - UNRELEASED HDFS-6370. Web UI fails to display in intranet under IE. (Haohui Mai via cnauroth) + HDFS-6381. Fix a typo in INodeReference.java. (Binglin Chang via jing9) + + HDFS-6400. Cannot execute hdfs oiv_legacy. (Akira AJISAKA via kihwal) + + HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality + (Binglin Chang and Chen He via junping_du) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES @@ -538,6 +558,17 @@ Release 2.4.1 - UNRELEASED HDFS-6313. WebHdfs may use the wrong NN when configured for multiple HA NNs (kihwal) + HDFS-6326. WebHdfs ACL compatibility is broken. (cnauroth) + + HDFS-6361. TestIdUserGroup.testUserUpdateSetting failed due to out of range + nfsnobody Id. (Yongjun Zhang via brandonli) + + HDFS-6362. InvalidateBlocks is inconsistent in usage of DatanodeUuid and + StorageID. (Arpit Agarwal) + + HDFS-6402. Suppress findbugs warning for failure to override equals and + hashCode in FsAclPermission. (cnauroth) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt index 82d1a324f05..fc5ebea4c0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt @@ -62,6 +62,9 @@ endfunction() INCLUDE(CheckCSourceCompiles) CHECK_C_SOURCE_COMPILES("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS) +# Check to see if we have Intel SSE intrinsics. +CHECK_C_SOURCE_COMPILES("#include \nint main(void) { __m128d sum0 = _mm_set_pd(0.0,0.0); return 0; }" HAVE_INTEL_SSE_INTRINSICS) + # Check if we need to link dl library to get dlopen. # dlopen on Linux is in separate library but on FreeBSD its in libc INCLUDE(CheckLibraryExists) @@ -170,6 +173,15 @@ target_link_libraries(test_libhdfs_zerocopy pthread ) +add_executable(test_libhdfs_vecsum + main/native/libhdfs/test/vecsum.c +) +target_link_libraries(test_libhdfs_vecsum + hdfs + pthread + rt +) + IF(REQUIRE_LIBWEBHDFS) add_subdirectory(contrib/libwebhdfs) ENDIF(REQUIRE_LIBWEBHDFS) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake index ac0b5308cc3..0d11fc4623d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake +++ b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake @@ -22,4 +22,6 @@ #cmakedefine HAVE_BETTER_TLS +#cmakedefine HAVE_INTEL_SSE_INTRINSICS + #endif diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 2c992dc23b7..fa2d863a75c 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -49,6 +49,7 @@ function print_usage(){ echo " balancer run a cluster balancing utility" echo " jmxget get JMX exported values from NameNode or DataNode." echo " oiv apply the offline fsimage viewer to an fsimage" + echo " oiv_legacy apply the offline fsimage viewer to an legacy fsimage" echo " oev apply the offline edits viewer to an edits file" echo " fetchdt fetch a delegation token from the NameNode" echo " getconf get config values from configuration" @@ -161,6 +162,8 @@ elif [ "$COMMAND" = "jmxget" ] ; then CLASS=org.apache.hadoop.hdfs.tools.JMXGet elif [ "$COMMAND" = "oiv" ] ; then CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB +elif [ "$COMMAND" = "oiv_legacy" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer elif [ "$COMMAND" = "oev" ] ; then CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer elif [ "$COMMAND" = "fetchdt" ] ; then diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 6b4a02ca339..86268275f86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -497,6 +497,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal"; public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold"; public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10; + public static final String DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY = "dfs.namenode.legacy-oiv-image.dir"; public static final String DFS_NAMESERVICES = "dfs.nameservices"; public static final String DFS_NAMESERVICE_ID = "dfs.nameservice.id"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java index 1ef5c538e45..676106de10f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java @@ -23,6 +23,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.BatchedRemoteIterator; +import org.apache.hadoop.fs.InvalidRequestException; +import org.apache.hadoop.ipc.RemoteException; + +import com.google.common.base.Preconditions; /** * CacheDirectiveIterator is a remote iterator that iterates cache directives. @@ -33,7 +37,7 @@ public class CacheDirectiveIterator extends BatchedRemoteIterator { - private final CacheDirectiveInfo filter; + private CacheDirectiveInfo filter; private final ClientProtocol namenode; public CacheDirectiveIterator(ClientProtocol namenode, @@ -43,10 +47,72 @@ public CacheDirectiveIterator(ClientProtocol namenode, this.filter = filter; } + private static CacheDirectiveInfo removeIdFromFilter(CacheDirectiveInfo filter) { + CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(filter); + builder.setId(null); + return builder.build(); + } + + /** + * Used for compatibility when communicating with a server version that + * does not support filtering directives by ID. + */ + private static class SingleEntry implements + BatchedEntries { + + private final CacheDirectiveEntry entry; + + public SingleEntry(final CacheDirectiveEntry entry) { + this.entry = entry; + } + + @Override + public CacheDirectiveEntry get(int i) { + if (i > 0) { + return null; + } + return entry; + } + + @Override + public int size() { + return 1; + } + + @Override + public boolean hasMore() { + return false; + } + } + @Override public BatchedEntries makeRequest(Long prevKey) throws IOException { - return namenode.listCacheDirectives(prevKey, filter); + BatchedEntries entries = null; + try { + entries = namenode.listCacheDirectives(prevKey, filter); + } catch (IOException e) { + if (e.getMessage().contains("Filtering by ID is unsupported")) { + // Retry case for old servers, do the filtering client-side + long id = filter.getId(); + filter = removeIdFromFilter(filter); + // Using id - 1 as prevId should get us a window containing the id + // This is somewhat brittle, since it depends on directives being + // returned in order of ascending ID. + entries = namenode.listCacheDirectives(id - 1, filter); + for (int i=0; i iter = currentTokens.keySet() + .iterator(); + while (iter.hasNext()) { + DelegationTokenIdentifier id = iter.next(); + id.write(out); + DelegationTokenInformation info = currentTokens.get(id); + out.writeLong(info.getRenewDate()); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + + /* + * Save the current state of allKeys + */ + private synchronized void saveAllKeys(DataOutputStream out, String sdPath) + throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.DELEGATION_KEYS, sdPath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size()); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + out.writeInt(allKeys.size()); + Iterator iter = allKeys.keySet().iterator(); + while (iter.hasNext()) { + Integer key = iter.next(); + allKeys.get(key).write(out); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + /** * Private helper methods to load Delegation tokens from fsimage */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index 1ab7f9148b7..25784a26cfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -170,7 +170,7 @@ DataEncryptionKey getDataEncryptionKey() } /* The idea for making sure that there is no more than one balancer - * running in an HDFS is to create a file in the HDFS, writes the IP address + * running in an HDFS is to create a file in the HDFS, writes the hostname * of the machine on which the balancer is running to the file, but did not * close the file until the balancer exits. * This prevents the second balancer from running because it can not diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 2610768dd10..e998ae8a32f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -265,7 +265,8 @@ public BlockManager(final Namesystem namesystem, final FSClusterStats stats, final long pendingPeriod = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_KEY, DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_DEFAULT); - invalidateBlocks = new InvalidateBlocks(datanodeManager, pendingPeriod); + invalidateBlocks = new InvalidateBlocks( + datanodeManager.blockInvalidateLimit, pendingPeriod); // Compute the map capacity by allocating 2% of total memory blocksMap = new BlocksMap( @@ -701,7 +702,7 @@ public LocatedBlock convertLastBlockToUnderConstruction( // remove this block from the list of pending blocks to be deleted. for (DatanodeStorageInfo storage : targets) { - invalidateBlocks.remove(storage.getStorageID(), oldBlock); + invalidateBlocks.remove(storage.getDatanodeDescriptor(), oldBlock); } // Adjust safe-mode totals, since under-construction blocks don't @@ -726,7 +727,7 @@ private List getValidLocations(Block block) { for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final String storageID = storage.getStorageID(); // filter invalidate replicas - if(!invalidateBlocks.contains(storageID, block)) { + if(!invalidateBlocks.contains(storage.getDatanodeDescriptor(), block)) { locations.add(storage); } } @@ -1016,7 +1017,7 @@ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { pendingDNMessages.removeAllMessagesForDatanode(node); node.resetBlocks(); - invalidateBlocks.remove(node.getDatanodeUuid()); + invalidateBlocks.remove(node); // If the DN hasn't block-reported since the most recent // failover, then we may have been holding up on processing @@ -1184,7 +1185,7 @@ public int getUnderReplicatedNotMissingBlocks() { * @return total number of block for deletion */ int computeInvalidateWork(int nodesToProcess) { - final List nodes = invalidateBlocks.getStorageIDs(); + final List nodes = invalidateBlocks.getDatanodes(); Collections.shuffle(nodes); nodesToProcess = Math.min(nodes.size(), nodesToProcess); @@ -1973,7 +1974,7 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, } // Ignore replicas already scheduled to be removed from the DN - if(invalidateBlocks.contains(dn.getDatanodeUuid(), block)) { + if(invalidateBlocks.contains(dn, block)) { /* * TODO: following assertion is incorrect, see HDFS-2668 assert * storedBlock.findDatanode(dn) < 0 : "Block " + block + @@ -3199,9 +3200,8 @@ private int getReplication(Block block) { * * @return number of blocks scheduled for removal during this iteration. */ - private int invalidateWorkForOneNode(String nodeId) { + private int invalidateWorkForOneNode(DatanodeInfo dn) { final List toInvalidate; - final DatanodeDescriptor dn; namesystem.writeLock(); try { @@ -3210,15 +3210,13 @@ private int invalidateWorkForOneNode(String nodeId) { LOG.debug("In safemode, not computing replication work"); return 0; } - // get blocks to invalidate for the nodeId - assert nodeId != null; - dn = datanodeManager.getDatanode(nodeId); - if (dn == null) { - invalidateBlocks.remove(nodeId); - return 0; - } - toInvalidate = invalidateBlocks.invalidateWork(nodeId, dn); - if (toInvalidate == null) { + try { + toInvalidate = invalidateBlocks.invalidateWork(datanodeManager.getDatanode(dn)); + + if (toInvalidate == null) { + return 0; + } + } catch(UnregisteredNodeException une) { return 0; } } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 1f248b9630a..8aca559f899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -44,13 +44,13 @@ */ @InterfaceAudience.Private class InvalidateBlocks { - /** Mapping: StorageID -> Collection of Blocks */ - private final Map> node2blocks = - new TreeMap>(); + /** Mapping: DatanodeInfo -> Collection of Blocks */ + private final Map> node2blocks = + new TreeMap>(); /** The total number of blocks in the map. */ private long numBlocks = 0L; - private final DatanodeManager datanodeManager; + private final int blockInvalidateLimit; /** * The period of pending time for block invalidation since the NameNode @@ -60,8 +60,8 @@ class InvalidateBlocks { /** the startup time */ private final long startupTime = Time.monotonicNow(); - InvalidateBlocks(final DatanodeManager datanodeManager, long pendingPeriodInMs) { - this.datanodeManager = datanodeManager; + InvalidateBlocks(final int blockInvalidateLimit, long pendingPeriodInMs) { + this.blockInvalidateLimit = blockInvalidateLimit; this.pendingPeriodInMs = pendingPeriodInMs; printBlockDeletionTime(BlockManager.LOG); } @@ -86,12 +86,9 @@ synchronized long numBlocks() { * invalidation. Blocks are compared including their generation stamps: * if a block is pending invalidation but with a different generation stamp, * returns false. - * @param storageID the storage to check - * @param the block to look for - * */ - synchronized boolean contains(final String storageID, final Block block) { - final LightWeightHashSet s = node2blocks.get(storageID); + synchronized boolean contains(final DatanodeInfo dn, final Block block) { + final LightWeightHashSet s = node2blocks.get(dn); if (s == null) { return false; // no invalidate blocks for this storage ID } @@ -106,10 +103,10 @@ synchronized boolean contains(final String storageID, final Block block) { */ synchronized void add(final Block block, final DatanodeInfo datanode, final boolean log) { - LightWeightHashSet set = node2blocks.get(datanode.getDatanodeUuid()); + LightWeightHashSet set = node2blocks.get(datanode); if (set == null) { set = new LightWeightHashSet(); - node2blocks.put(datanode.getDatanodeUuid(), set); + node2blocks.put(datanode, set); } if (set.add(block)) { numBlocks++; @@ -121,20 +118,20 @@ synchronized void add(final Block block, final DatanodeInfo datanode, } /** Remove a storage from the invalidatesSet */ - synchronized void remove(final String storageID) { - final LightWeightHashSet blocks = node2blocks.remove(storageID); + synchronized void remove(final DatanodeInfo dn) { + final LightWeightHashSet blocks = node2blocks.remove(dn); if (blocks != null) { numBlocks -= blocks.size(); } } /** Remove the block from the specified storage. */ - synchronized void remove(final String storageID, final Block block) { - final LightWeightHashSet v = node2blocks.get(storageID); + synchronized void remove(final DatanodeInfo dn, final Block block) { + final LightWeightHashSet v = node2blocks.get(dn); if (v != null && v.remove(block)) { numBlocks--; if (v.isEmpty()) { - node2blocks.remove(storageID); + node2blocks.remove(dn); } } } @@ -148,18 +145,18 @@ synchronized void dump(final PrintWriter out) { return; } - for(Map.Entry> entry : node2blocks.entrySet()) { + for(Map.Entry> entry : node2blocks.entrySet()) { final LightWeightHashSet blocks = entry.getValue(); if (blocks.size() > 0) { - out.println(datanodeManager.getDatanode(entry.getKey())); + out.println(entry.getKey()); out.println(blocks); } } } /** @return a list of the storage IDs. */ - synchronized List getStorageIDs() { - return new ArrayList(node2blocks.keySet()); + synchronized List getDatanodes() { + return new ArrayList(node2blocks.keySet()); } /** @@ -170,8 +167,7 @@ long getInvalidationDelay() { return pendingPeriodInMs - (Time.monotonicNow() - startupTime); } - synchronized List invalidateWork( - final String storageId, final DatanodeDescriptor dn) { + synchronized List invalidateWork(final DatanodeDescriptor dn) { final long delay = getInvalidationDelay(); if (delay > 0) { if (BlockManager.LOG.isDebugEnabled()) { @@ -181,18 +177,18 @@ synchronized List invalidateWork( } return null; } - final LightWeightHashSet set = node2blocks.get(storageId); + final LightWeightHashSet set = node2blocks.get(dn); if (set == null) { return null; } // # blocks that can be sent in one message is limited - final int limit = datanodeManager.blockInvalidateLimit; + final int limit = blockInvalidateLimit; final List toInvalidate = set.pollN(limit); // If we send everything in this message, remove this node entry if (set.isEmpty()) { - remove(storageId); + remove(dn); } dn.addBlocksToBeInvalidated(toInvalidate); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java index a79bb393f9f..841fcb8e43b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java @@ -27,8 +27,10 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.ScopedAclEntries; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -90,7 +92,7 @@ public static void copyINodeDefaultAcl(INode child) { FsPermission childPerm = child.getFsPermission(); // Copy each default ACL entry from parent to new child's access ACL. - boolean parentDefaultIsMinimal = isMinimalAcl(parentDefaultEntries); + boolean parentDefaultIsMinimal = AclUtil.isMinimalAcl(parentDefaultEntries); for (AclEntry entry: parentDefaultEntries) { AclEntryType type = entry.getType(); String name = entry.getName(); @@ -127,7 +129,7 @@ public static void copyINodeDefaultAcl(INode child) { Collections.emptyList(); final FsPermission newPerm; - if (!isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) { + if (!AclUtil.isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) { // Save the new ACL to the child. child.addAclFeature(createAclFeature(accessEntries, defaultEntries)); newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm); @@ -172,7 +174,7 @@ public static List readINodeLogicalAcl(INode inode) { FsPermission perm = inode.getFsPermission(); AclFeature f = inode.getAclFeature(); if (f == null) { - return getMinimalAcl(perm); + return AclUtil.getMinimalAcl(perm); } final List existingAcl; @@ -208,7 +210,7 @@ public static List readINodeLogicalAcl(INode inode) { } else { // It's possible that there is a default ACL but no access ACL. In this // case, add the minimal access ACL implied by the permission bits. - existingAcl.addAll(getMinimalAcl(perm)); + existingAcl.addAll(AclUtil.getMinimalAcl(perm)); } // Add all default entries after the access entries. @@ -267,7 +269,7 @@ public static void updateINodeAcl(INode inode, List newAcl, assert newAcl.size() >= 3; FsPermission perm = inode.getFsPermission(); final FsPermission newPerm; - if (!isMinimalAcl(newAcl)) { + if (!AclUtil.isMinimalAcl(newAcl)) { // This is an extended ACL. Split entries into access vs. default. ScopedAclEntries scoped = new ScopedAclEntries(newAcl); List accessEntries = scoped.getAccessEntries(); @@ -321,7 +323,7 @@ private static AclFeature createAclFeature(List accessEntries, // For the access ACL, the feature only needs to hold the named user and // group entries. For a correctly sorted ACL, these will be in a // predictable range. - if (!isMinimalAcl(accessEntries)) { + if (!AclUtil.isMinimalAcl(accessEntries)) { featureEntries.addAll( accessEntries.subList(1, accessEntries.size() - 2)); } @@ -366,41 +368,4 @@ private static FsPermission createFsPermissionForMinimalAcl( accessEntries.get(2).getPermission(), existingPerm.getStickyBit()); } - - /** - * Translates the given permission bits to the equivalent minimal ACL. - * - * @param perm FsPermission to translate - * @return List containing exactly 3 entries representing the owner, - * group and other permissions - */ - private static List getMinimalAcl(FsPermission perm) { - return Lists.newArrayList( - new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.USER) - .setPermission(perm.getUserAction()) - .build(), - new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.GROUP) - .setPermission(perm.getGroupAction()) - .build(), - new AclEntry.Builder() - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.OTHER) - .setPermission(perm.getOtherAction()) - .build()); - } - - /** - * Checks if the given entries represent a minimal ACL (contains exactly 3 - * entries). - * - * @param entries List entries to check - * @return boolean true if the entries represent a minimal ACL - */ - private static boolean isMinimalAcl(List entries) { - return entries.size() == 3; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java index 3a3f9206ebd..1474e039e6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.ScopedAclEntries; import org.apache.hadoop.hdfs.protocol.AclException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 48e9c1e38e3..1d74cf3af4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT; import java.io.DataInput; +import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -61,10 +62,10 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor; @@ -690,15 +691,25 @@ public void removeDirective(long id, FSPermissionChecker pc) assert namesystem.hasReadLock(); final int NUM_PRE_ALLOCATED_ENTRIES = 16; String filterPath = null; - if (filter.getId() != null) { - throw new IOException("Filtering by ID is unsupported."); - } if (filter.getPath() != null) { filterPath = validatePath(filter); } if (filter.getReplication() != null) { - throw new IOException("Filtering by replication is unsupported."); + throw new InvalidRequestException( + "Filtering by replication is unsupported."); } + + // Querying for a single ID + final Long id = filter.getId(); + if (id != null) { + if (!directivesById.containsKey(id)) { + throw new InvalidRequestException("Did not find requested id " + id); + } + // Since we use a tailMap on directivesById, setting prev to id-1 gets + // us the directive with the id (if present) + prevId = id - 1; + } + ArrayList replies = new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); int numReplies = 0; @@ -710,6 +721,14 @@ public void removeDirective(long id, FSPermissionChecker pc) } CacheDirective curDirective = cur.getValue(); CacheDirectiveInfo info = cur.getValue().toInfo(); + + // If the requested ID is present, it should be the first item. + // Hitting this case means the ID is not present, or we're on the second + // item and should break out. + if (id != null && + !(info.getId().equals(id))) { + break; + } if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) { continue; @@ -953,6 +972,18 @@ private void processCacheReportImpl(final DatanodeDescriptor datanode, } } + /** + * Saves the current state of the CacheManager to the DataOutput. Used + * to persist CacheManager state in the FSImage. + * @param out DataOutput to persist state + * @param sdPath path of the storage directory + * @throws IOException + */ + public void saveStateCompat(DataOutputStream out, String sdPath) + throws IOException { + serializerCompat.save(out, sdPath); + } + public PersistState saveState() throws IOException { ArrayList pools = Lists .newArrayListWithCapacity(cachePools.size()); @@ -1072,6 +1103,12 @@ private void addCacheDirective(final String poolName, } private final class SerializerCompat { + private void save(DataOutputStream out, String sdPath) throws IOException { + out.writeLong(nextDirectiveId); + savePools(out, sdPath); + saveDirectives(out, sdPath); + } + private void load(DataInput in) throws IOException { nextDirectiveId = in.readLong(); // pools need to be loaded first since directives point to their parent pool @@ -1079,6 +1116,42 @@ private void load(DataInput in) throws IOException { loadDirectives(in); } + /** + * Save cache pools to fsimage + */ + private void savePools(DataOutputStream out, + String sdPath) throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_POOLS, sdPath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size()); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + out.writeInt(cachePools.size()); + for (CachePool pool: cachePools.values()) { + FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true)); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + + /* + * Save cache entries to fsimage + */ + private void saveDirectives(DataOutputStream out, String sdPath) + throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_ENTRIES, sdPath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size()); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + out.writeInt(directivesById.size()); + for (CacheDirective directive : directivesById.values()) { + FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo()); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + /** * Load cache pools from fsimage */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java index 0bc62d8f288..b1636bc267e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java @@ -41,6 +41,9 @@ public class CheckpointConf { /** maxium number of retries when merge errors occur */ private final int maxRetriesOnMergeError; + + /** The output dir for legacy OIV image */ + private final String legacyOivImageDir; public CheckpointConf(Configuration conf) { checkpointCheckPeriod = conf.getLong( @@ -53,6 +56,7 @@ public CheckpointConf(Configuration conf) { DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT); maxRetriesOnMergeError = conf.getInt(DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_DEFAULT); + legacyOivImageDir = conf.get(DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); warnForDeprecatedConfigs(conf); } @@ -83,4 +87,8 @@ public long getTxnCount() { public int getMaxRetriesOnMergeError() { return maxRetriesOnMergeError; } + + public String getLegacyOivImageDir() { + return legacyOivImageDir; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c40ec786591..bd92cb4f439 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; +import org.apache.hadoop.hdfs.protocol.FsAclPermission; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; @@ -2592,7 +2593,7 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), - node.getFsPermission(snapshot), + getPermissionForFileStatus(node, snapshot), node.getUserName(snapshot), node.getGroupName(snapshot), node.isSymlink() ? node.asSymlink().getSymlink() : null, @@ -2634,7 +2635,8 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), - node.getAccessTime(snapshot), node.getFsPermission(snapshot), + node.getAccessTime(snapshot), + getPermissionForFileStatus(node, snapshot), node.getUserName(snapshot), node.getGroupName(snapshot), node.isSymlink() ? node.asSymlink().getSymlink() : null, path, node.getId(), loc, childrenNum); @@ -2648,6 +2650,22 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, return status; } + /** + * Returns an inode's FsPermission for use in an outbound FileStatus. If the + * inode has an ACL, then this method will convert to a FsAclPermission. + * + * @param node INode to check + * @param snapshot int snapshot ID + * @return FsPermission from inode, with ACL bit on if the inode has an ACL + */ + private static FsPermission getPermissionForFileStatus(INode node, + int snapshot) { + FsPermission perm = node.getFsPermission(snapshot); + if (node.getAclFeature(snapshot) != null) { + perm = new FsAclPermission(perm); + } + return perm; + } /** * Add the given symbolic link to the fs. Record it in the edits log. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 93e5de04ea6..36028a0283a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -934,6 +934,25 @@ void saveFSImage(SaveNamespaceContext context, StorageDirectory sd, storage.setMostRecentCheckpointInfo(txid, Time.now()); } + /** + * Save FSimage in the legacy format. This is not for NN consumption, + * but for tools like OIV. + */ + public void saveLegacyOIVImage(FSNamesystem source, String targetDir, + Canceler canceler) throws IOException { + FSImageCompression compression = + FSImageCompression.createCompression(conf); + long txid = getLastAppliedOrWrittenTxId(); + SaveNamespaceContext ctx = new SaveNamespaceContext(source, txid, + canceler); + FSImageFormat.Saver saver = new FSImageFormat.Saver(ctx); + String imageFileName = NNStorage.getLegacyOIVImageFileName(txid); + File imageFile = new File(targetDir, imageFileName); + saver.save(imageFile, compression); + archivalManager.purgeOldLegacyOIVImages(targetDir, txid); + } + + /** * FSImageSaver is being run in a separate thread when saving * FSImage. There is one thread per each copy of the image. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 9fafa7fa9ae..2ac5fe41f76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -21,14 +21,20 @@ import java.io.DataInput; import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.security.DigestInputStream; +import java.security.DigestOutputStream; import java.security.MessageDigest; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -50,6 +56,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; +import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -60,6 +67,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; +import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; @@ -69,8 +77,105 @@ import com.google.common.annotations.VisibleForTesting; /** - * This class loads and stores the FSImage of the NameNode. The file - * src/main/proto/fsimage.proto describes the on-disk layout of the FSImage. + * Contains inner classes for reading or writing the on-disk format for + * FSImages. + * + * In particular, the format of the FSImage looks like: + *
+ * FSImage {
+ *   layoutVersion: int, namespaceID: int, numberItemsInFSDirectoryTree: long,
+ *   namesystemGenerationStampV1: long, namesystemGenerationStampV2: long,
+ *   generationStampAtBlockIdSwitch:long, lastAllocatedBlockId:
+ *   long transactionID: long, snapshotCounter: int, numberOfSnapshots: int,
+ *   numOfSnapshottableDirs: int,
+ *   {FSDirectoryTree, FilesUnderConstruction, SecretManagerState} (can be compressed)
+ * }
+ *
+ * FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported) {
+ *   INodeInfo of root, numberOfChildren of root: int
+ *   [list of INodeInfo of root's children],
+ *   [list of INodeDirectoryInfo of root's directory children]
+ * }
+ *
+ * FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} not supported){
+ *   [list of INodeInfo of INodes in topological order]
+ * }
+ *
+ * INodeInfo {
+ *   {
+ *     localName: short + byte[]
+ *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported
+ *   or
+ *   {
+ *     fullPath: byte[]
+ *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
+ *   replicationFactor: short, modificationTime: long,
+ *   accessTime: long, preferredBlockSize: long,
+ *   numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
+ *   {
+ *     nsQuota: long, dsQuota: long,
+ *     {
+ *       isINodeSnapshottable: byte,
+ *       isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
+ *     } (when {@link Feature#SNAPSHOT} is supported),
+ *     fsPermission: short, PermissionStatus
+ *   } for INodeDirectory
+ *   or
+ *   {
+ *     symlinkString, fsPermission: short, PermissionStatus
+ *   } for INodeSymlink
+ *   or
+ *   {
+ *     [list of BlockInfo]
+ *     [list of FileDiff]
+ *     {
+ *       isINodeFileUnderConstructionSnapshot: byte,
+ *       {clientName: short + byte[], clientMachine: short + byte[]} (when
+ *       isINodeFileUnderConstructionSnapshot is true),
+ *     } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode),
+ *     fsPermission: short, PermissionStatus
+ *   } for INodeFile
+ * }
+ *
+ * INodeDirectoryInfo {
+ *   fullPath of the directory: short + byte[],
+ *   numberOfChildren: int, [list of INodeInfo of children INode],
+ *   {
+ *     numberOfSnapshots: int,
+ *     [list of Snapshot] (when NumberOfSnapshots is positive),
+ *     numberOfDirectoryDiffs: int,
+ *     [list of DirectoryDiff] (NumberOfDirectoryDiffs is positive),
+ *     number of children that are directories,
+ *     [list of INodeDirectoryInfo of the directory children] (includes
+ *     snapshot copies of deleted sub-directories)
+ *   } (when {@link Feature#SNAPSHOT} is supported),
+ * }
+ *
+ * Snapshot {
+ *   snapshotID: int, root of Snapshot: INodeDirectoryInfo (its local name is
+ *   the name of the snapshot)
+ * }
+ *
+ * DirectoryDiff {
+ *   full path of the root of the associated Snapshot: short + byte[],
+ *   childrenSize: int,
+ *   isSnapshotRoot: byte,
+ *   snapshotINodeIsNotNull: byte (when isSnapshotRoot is false),
+ *   snapshotINode: INodeDirectory (when SnapshotINodeIsNotNull is true), Diff
+ * }
+ *
+ * Diff {
+ *   createdListSize: int, [Local name of INode in created list],
+ *   deletedListSize: int, [INode in deleted list: INodeInfo]
+ * }
+ *
+ * FileDiff {
+ *   full path of the root of the associated Snapshot: short + byte[],
+ *   fileSize: long,
+ *   snapshotINodeIsNotNull: byte,
+ *   snapshotINode: INodeFile (when SnapshotINodeIsNotNull is true), Diff
+ * }
+ * 
*/ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -580,6 +685,11 @@ public void updateBlocksMap(INodeFile file) { } } + /** @return The FSDirectory of the namesystem where the fsimage is loaded */ + public FSDirectory getFSDirectoryInLoading() { + return namesystem.dir; + } + public INode loadINodeWithLocalName(boolean isSnapshotINode, DataInput in, boolean updateINodeMap) throws IOException { return loadINodeWithLocalName(isSnapshotINode, in, updateINodeMap, null); @@ -1009,7 +1119,7 @@ static String renameReservedPathsOnUpgrade(String path, + " option to automatically rename these paths during upgrade."; /** - * Same as {@link #renameReservedPathsOnUpgrade}, but for a single + * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single * byte array path component. */ private static byte[] renameReservedComponentOnUpgrade(byte[] component, @@ -1029,7 +1139,7 @@ private static byte[] renameReservedComponentOnUpgrade(byte[] component, } /** - * Same as {@link #renameReservedPathsOnUpgrade}, but for a single + * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single * byte array path component. */ private static byte[] renameReservedRootComponentOnUpgrade(byte[] component, @@ -1050,4 +1160,271 @@ private static byte[] renameReservedRootComponentOnUpgrade(byte[] component, } return component; } + + /** + * A one-shot class responsible for writing an image file. + * The write() function should be called once, after which the getter + * functions may be used to retrieve information about the file that was written. + * + * This is replaced by the PB-based FSImage. The class is to maintain + * compatibility for the external fsimage tool. + */ + @Deprecated + static class Saver { + private static final int LAYOUT_VERSION = -51; + private final SaveNamespaceContext context; + /** Set to true once an image has been written */ + private boolean saved = false; + + /** The MD5 checksum of the file that was written */ + private MD5Hash savedDigest; + private final ReferenceMap referenceMap = new ReferenceMap(); + + private final Map snapshotUCMap = + new HashMap(); + + /** @throws IllegalStateException if the instance has not yet saved an image */ + private void checkSaved() { + if (!saved) { + throw new IllegalStateException("FSImageSaver has not saved an image"); + } + } + + /** @throws IllegalStateException if the instance has already saved an image */ + private void checkNotSaved() { + if (saved) { + throw new IllegalStateException("FSImageSaver has already saved an image"); + } + } + + + Saver(SaveNamespaceContext context) { + this.context = context; + } + + /** + * Return the MD5 checksum of the image file that was saved. + */ + MD5Hash getSavedDigest() { + checkSaved(); + return savedDigest; + } + + void save(File newFile, FSImageCompression compression) throws IOException { + checkNotSaved(); + + final FSNamesystem sourceNamesystem = context.getSourceNamesystem(); + final INodeDirectory rootDir = sourceNamesystem.dir.rootDir; + final long numINodes = rootDir.getDirectoryWithQuotaFeature() + .getSpaceConsumed().get(Quota.NAMESPACE); + String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath(); + Step step = new Step(StepType.INODES, sdPath); + StartupProgress prog = NameNode.getStartupProgress(); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + long startTime = now(); + // + // Write out data + // + MessageDigest digester = MD5Hash.getDigester(); + FileOutputStream fout = new FileOutputStream(newFile); + DigestOutputStream fos = new DigestOutputStream(fout, digester); + DataOutputStream out = new DataOutputStream(fos); + try { + out.writeInt(LAYOUT_VERSION); + LayoutFlags.write(out); + // We use the non-locked version of getNamespaceInfo here since + // the coordinating thread of saveNamespace already has read-locked + // the namespace for us. If we attempt to take another readlock + // from the actual saver thread, there's a potential of a + // fairness-related deadlock. See the comments on HDFS-2223. + out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo() + .getNamespaceID()); + out.writeLong(numINodes); + out.writeLong(sourceNamesystem.getGenerationStampV1()); + out.writeLong(sourceNamesystem.getGenerationStampV2()); + out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch()); + out.writeLong(sourceNamesystem.getLastAllocatedBlockId()); + out.writeLong(context.getTxId()); + out.writeLong(sourceNamesystem.getLastInodeId()); + + + sourceNamesystem.getSnapshotManager().write(out); + + // write compression info and set up compressed stream + out = compression.writeHeaderAndWrapStream(fos); + LOG.info("Saving image file " + newFile + + " using " + compression); + + // save the root + saveINode2Image(rootDir, out, false, referenceMap, counter); + // save the rest of the nodes + saveImage(rootDir, out, true, false, counter); + prog.endStep(Phase.SAVING_CHECKPOINT, step); + // Now that the step is finished, set counter equal to total to adjust + // for possible under-counting due to reference inodes. + prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes); + // save files under construction + // TODO: for HDFS-5428, since we cannot break the compatibility of + // fsimage, we store part of the under-construction files that are only + // in snapshots in this "under-construction-file" section. As a + // temporary solution, we use "/.reserved/.inodes/" as their + // paths, so that when loading fsimage we do not put them into the lease + // map. In the future, we can remove this hack when we can bump the + // layout version. + sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap); + + context.checkCancelled(); + sourceNamesystem.saveSecretManagerStateCompat(out, sdPath); + context.checkCancelled(); + sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath); + context.checkCancelled(); + out.flush(); + context.checkCancelled(); + fout.getChannel().force(true); + } finally { + out.close(); + } + + saved = true; + // set md5 of the saved image + savedDigest = new MD5Hash(digester.digest()); + + LOG.info("Image file " + newFile + " of size " + newFile.length() + + " bytes saved in " + (now() - startTime)/1000 + " seconds."); + } + + /** + * Save children INodes. + * @param children The list of children INodes + * @param out The DataOutputStream to write + * @param inSnapshot Whether the parent directory or its ancestor is in + * the deleted list of some snapshot (caused by rename or + * deletion) + * @param counter Counter to increment for namenode startup progress + * @return Number of children that are directory + */ + private int saveChildren(ReadOnlyList children, + DataOutputStream out, boolean inSnapshot, Counter counter) + throws IOException { + // Write normal children INode. + out.writeInt(children.size()); + int dirNum = 0; + int i = 0; + for(INode child : children) { + // print all children first + // TODO: for HDFS-5428, we cannot change the format/content of fsimage + // here, thus even if the parent directory is in snapshot, we still + // do not handle INodeUC as those stored in deleted list + saveINode2Image(child, out, false, referenceMap, counter); + if (child.isDirectory()) { + dirNum++; + } else if (inSnapshot && child.isFile() + && child.asFile().isUnderConstruction()) { + this.snapshotUCMap.put(child.getId(), child.asFile()); + } + if (i++ % 50 == 0) { + context.checkCancelled(); + } + } + return dirNum; + } + + /** + * Save file tree image starting from the given root. + * This is a recursive procedure, which first saves all children and + * snapshot diffs of a current directory and then moves inside the + * sub-directories. + * + * @param current The current node + * @param out The DataoutputStream to write the image + * @param toSaveSubtree Whether or not to save the subtree to fsimage. For + * reference node, its subtree may already have been + * saved before. + * @param inSnapshot Whether the current directory is in snapshot + * @param counter Counter to increment for namenode startup progress + */ + private void saveImage(INodeDirectory current, DataOutputStream out, + boolean toSaveSubtree, boolean inSnapshot, Counter counter) + throws IOException { + // write the inode id of the directory + out.writeLong(current.getId()); + + if (!toSaveSubtree) { + return; + } + + final ReadOnlyList children = current + .getChildrenList(Snapshot.CURRENT_STATE_ID); + int dirNum = 0; + List snapshotDirs = null; + DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature(); + if (sf != null) { + snapshotDirs = new ArrayList(); + sf.getSnapshotDirectory(snapshotDirs); + dirNum += snapshotDirs.size(); + } + + // 2. Write INodeDirectorySnapshottable#snapshotsByNames to record all + // Snapshots + if (current instanceof INodeDirectorySnapshottable) { + INodeDirectorySnapshottable snapshottableNode = + (INodeDirectorySnapshottable) current; + SnapshotFSImageFormat.saveSnapshots(snapshottableNode, out); + } else { + out.writeInt(-1); // # of snapshots + } + + // 3. Write children INode + dirNum += saveChildren(children, out, inSnapshot, counter); + + // 4. Write DirectoryDiff lists, if there is any. + SnapshotFSImageFormat.saveDirectoryDiffList(current, out, referenceMap); + + // Write sub-tree of sub-directories, including possible snapshots of + // deleted sub-directories + out.writeInt(dirNum); // the number of sub-directories + for(INode child : children) { + if(!child.isDirectory()) { + continue; + } + // make sure we only save the subtree under a reference node once + boolean toSave = child.isReference() ? + referenceMap.toProcessSubtree(child.getId()) : true; + saveImage(child.asDirectory(), out, toSave, inSnapshot, counter); + } + if (snapshotDirs != null) { + for (INodeDirectory subDir : snapshotDirs) { + // make sure we only save the subtree under a reference node once + boolean toSave = subDir.getParentReference() != null ? + referenceMap.toProcessSubtree(subDir.getId()) : true; + saveImage(subDir, out, toSave, true, counter); + } + } + } + + /** + * Saves inode and increments progress counter. + * + * @param inode INode to save + * @param out DataOutputStream to receive inode + * @param writeUnderConstruction boolean true if this is under construction + * @param referenceMap ReferenceMap containing reference inodes + * @param counter Counter to increment for namenode startup progress + * @throws IOException thrown if there is an I/O error + */ + private void saveINode2Image(INode inode, DataOutputStream out, + boolean writeUnderConstruction, ReferenceMap referenceMap, + Counter counter) throws IOException { + FSImageSerialization.saveINode2Image(inode, out, writeUnderConstruction, + referenceMap); + // Intentionally do not increment counter for reference inodes, because it + // is too difficult at this point to assess whether or not this is a + // reference that counts toward quota. + if (!(inode instanceof INodeReference)) { + counter.increment(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index b863737c0b3..4429c528f65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; @@ -31,20 +36,21 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.ShortWritable; +import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.DataOutputStream; -import java.io.IOException; +import com.google.common.base.Preconditions; /** * Static utility functions for serializing various pieces of data in the correct @@ -82,6 +88,26 @@ static private final class TLData { final ShortWritable U_SHORT = new ShortWritable(); final IntWritable U_INT = new IntWritable(); final LongWritable U_LONG = new LongWritable(); + final FsPermission FILE_PERM = new FsPermission((short) 0); + } + + private static void writePermissionStatus(INodeAttributes inode, + DataOutput out) throws IOException { + final FsPermission p = TL_DATA.get().FILE_PERM; + p.fromShort(inode.getFsPermissionShort()); + PermissionStatus.write(out, inode.getUserName(), inode.getGroupName(), p); + } + + private static void writeBlocks(final Block[] blocks, + final DataOutput out) throws IOException { + if (blocks == null) { + out.writeInt(0); + } else { + out.writeInt(blocks.length); + for (Block blk : blocks) { + blk.write(out); + } + } } // Helper function that reads in an INodeUnderConstruction @@ -127,6 +153,183 @@ static INodeFile readINodeUnderConstruction( return file; } + // Helper function that writes an INodeUnderConstruction + // into the input stream + // + static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons, + String path) throws IOException { + writeString(path, out); + out.writeLong(cons.getId()); + out.writeShort(cons.getFileReplication()); + out.writeLong(cons.getModificationTime()); + out.writeLong(cons.getPreferredBlockSize()); + + writeBlocks(cons.getBlocks(), out); + cons.getPermissionStatus().write(out); + + FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature(); + writeString(uc.getClientName(), out); + writeString(uc.getClientMachine(), out); + + out.writeInt(0); // do not store locations of last block + } + + /** + * Serialize a {@link INodeFile} node + * @param node The node to write + * @param out The {@link DataOutputStream} where the fields are written + * @param writeBlock Whether to write block information + */ + public static void writeINodeFile(INodeFile file, DataOutput out, + boolean writeUnderConstruction) throws IOException { + writeLocalName(file, out); + out.writeLong(file.getId()); + out.writeShort(file.getFileReplication()); + out.writeLong(file.getModificationTime()); + out.writeLong(file.getAccessTime()); + out.writeLong(file.getPreferredBlockSize()); + + writeBlocks(file.getBlocks(), out); + SnapshotFSImageFormat.saveFileDiffList(file, out); + + if (writeUnderConstruction) { + if (file.isUnderConstruction()) { + out.writeBoolean(true); + final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); + writeString(uc.getClientName(), out); + writeString(uc.getClientMachine(), out); + } else { + out.writeBoolean(false); + } + } + + writePermissionStatus(file, out); + } + + /** Serialize an {@link INodeFileAttributes}. */ + public static void writeINodeFileAttributes(INodeFileAttributes file, + DataOutput out) throws IOException { + writeLocalName(file, out); + writePermissionStatus(file, out); + out.writeLong(file.getModificationTime()); + out.writeLong(file.getAccessTime()); + + out.writeShort(file.getFileReplication()); + out.writeLong(file.getPreferredBlockSize()); + } + + private static void writeQuota(Quota.Counts quota, DataOutput out) + throws IOException { + out.writeLong(quota.get(Quota.NAMESPACE)); + out.writeLong(quota.get(Quota.DISKSPACE)); + } + + /** + * Serialize a {@link INodeDirectory} + * @param node The node to write + * @param out The {@link DataOutput} where the fields are written + */ + public static void writeINodeDirectory(INodeDirectory node, DataOutput out) + throws IOException { + writeLocalName(node, out); + out.writeLong(node.getId()); + out.writeShort(0); // replication + out.writeLong(node.getModificationTime()); + out.writeLong(0); // access time + out.writeLong(0); // preferred block size + out.writeInt(-1); // # of blocks + + writeQuota(node.getQuotaCounts(), out); + + if (node instanceof INodeDirectorySnapshottable) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + out.writeBoolean(node.isWithSnapshot()); + } + + writePermissionStatus(node, out); + } + + /** + * Serialize a {@link INodeDirectory} + * @param a The node to write + * @param out The {@link DataOutput} where the fields are written + */ + public static void writeINodeDirectoryAttributes( + INodeDirectoryAttributes a, DataOutput out) throws IOException { + writeLocalName(a, out); + writePermissionStatus(a, out); + out.writeLong(a.getModificationTime()); + writeQuota(a.getQuotaCounts(), out); + } + + /** + * Serialize a {@link INodeSymlink} node + * @param node The node to write + * @param out The {@link DataOutput} where the fields are written + */ + private static void writeINodeSymlink(INodeSymlink node, DataOutput out) + throws IOException { + writeLocalName(node, out); + out.writeLong(node.getId()); + out.writeShort(0); // replication + out.writeLong(0); // modification time + out.writeLong(0); // access time + out.writeLong(0); // preferred block size + out.writeInt(-2); // # of blocks + + Text.writeString(out, node.getSymlinkString()); + writePermissionStatus(node, out); + } + + /** Serialize a {@link INodeReference} node */ + private static void writeINodeReference(INodeReference ref, DataOutput out, + boolean writeUnderConstruction, ReferenceMap referenceMap + ) throws IOException { + writeLocalName(ref, out); + out.writeLong(ref.getId()); + out.writeShort(0); // replication + out.writeLong(0); // modification time + out.writeLong(0); // access time + out.writeLong(0); // preferred block size + out.writeInt(-3); // # of blocks + + final boolean isWithName = ref instanceof INodeReference.WithName; + out.writeBoolean(isWithName); + + if (!isWithName) { + Preconditions.checkState(ref instanceof INodeReference.DstReference); + // dst snapshot id + out.writeInt(((INodeReference.DstReference) ref).getDstSnapshotId()); + } else { + out.writeInt(((INodeReference.WithName) ref).getLastSnapshotId()); + } + + final INodeReference.WithCount withCount + = (INodeReference.WithCount)ref.getReferredINode(); + referenceMap.writeINodeReferenceWithCount(withCount, out, + writeUnderConstruction); + } + + /** + * Save one inode's attributes to the image. + */ + public static void saveINode2Image(INode node, DataOutput out, + boolean writeUnderConstruction, ReferenceMap referenceMap) + throws IOException { + if (node.isReference()) { + writeINodeReference(node.asReference(), out, writeUnderConstruction, + referenceMap); + } else if (node.isDirectory()) { + writeINodeDirectory(node.asDirectory(), out); + } else if (node.isSymlink()) { + writeINodeSymlink(node.asSymlink(), out); + } else if (node.isFile()) { + writeINodeFile(node.asFile(), out, writeUnderConstruction); + } + } + // This should be reverted to package private once the ImageLoader // code is moved into this package. This method should not be called // by other code. @@ -226,6 +429,12 @@ public static byte[] readLocalName(DataInput in) throws IOException { in.readFully(createdNodeName); return createdNodeName; } + + private static void writeLocalName(INodeAttributes inode, DataOutput out) + throws IOException { + final byte[] name = inode.getLocalNameBytes(); + writeBytes(name, out); + } public static void writeBytes(byte[] data, DataOutput out) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a95166bed07..8cb4e1a9c4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -85,17 +85,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.util.Time.now; -import java.io.BufferedWriter; -import java.io.ByteArrayInputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.PrintWriter; -import java.io.StringWriter; +import java.io.*; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.URI; @@ -6091,6 +6081,42 @@ void unprotectedChangeLease(String src, String dst) { leaseManager.changeLease(src, dst); } + /** + * Serializes leases. + */ + void saveFilesUnderConstruction(DataOutputStream out, + Map snapshotUCMap) throws IOException { + // This is run by an inferior thread of saveNamespace, which holds a read + // lock on our behalf. If we took the read lock here, we could block + // for fairness if a writer is waiting on the lock. + synchronized (leaseManager) { + Map nodes = leaseManager.getINodesUnderConstruction(); + for (Map.Entry entry : nodes.entrySet()) { + // TODO: for HDFS-5428, because of rename operations, some + // under-construction files that are + // in the current fs directory can also be captured in the + // snapshotUCMap. We should remove them from the snapshotUCMap. + snapshotUCMap.remove(entry.getValue().getId()); + } + + out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size + for (Map.Entry entry : nodes.entrySet()) { + FSImageSerialization.writeINodeUnderConstruction( + out, entry.getValue(), entry.getKey()); + } + for (Map.Entry entry : snapshotUCMap.entrySet()) { + // for those snapshot INodeFileUC, we use "/.reserved/.inodes/" + // as their paths + StringBuilder b = new StringBuilder(); + b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX) + .append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING) + .append(Path.SEPARATOR).append(entry.getValue().getId()); + FSImageSerialization.writeINodeUnderConstruction( + out, entry.getValue(), b.toString()); + } + } + } + /** * @return all the under-construction files in the lease map */ @@ -6377,6 +6403,15 @@ void cancelDelegationToken(Token token) getEditLog().logSync(); } + /** + * @param out save state of the secret manager + * @param sdPath String storage directory path + */ + void saveSecretManagerStateCompat(DataOutputStream out, String sdPath) + throws IOException { + dtSecretManager.saveSecretManagerStateCompat(out, sdPath); + } + SecretManagerState saveSecretManagerState() { return dtSecretManager.saveSecretManagerState(); } @@ -7403,6 +7438,7 @@ long addCacheDirective(CacheDirectiveInfo directive, EnumSet flags) cacheManager.waitForRescanIfNeeded(); } writeLock(); + String effectiveDirectiveStr = null; Long result = null; try { checkOperation(OperationCategory.WRITE); @@ -7414,11 +7450,12 @@ long addCacheDirective(CacheDirectiveInfo directive, EnumSet flags) throw new IOException("addDirective: you cannot specify an ID " + "for this operation."); } - CacheDirectiveInfo effectiveDirective = + CacheDirectiveInfo effectiveDirective = cacheManager.addDirective(directive, pc, flags); getEditLog().logAddCacheDirectiveInfo(effectiveDirective, cacheEntry != null); result = effectiveDirective.getId(); + effectiveDirectiveStr = effectiveDirective.toString(); success = true; } finally { writeUnlock(); @@ -7426,7 +7463,7 @@ long addCacheDirective(CacheDirectiveInfo directive, EnumSet flags) getEditLog().logSync(); } if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "addCacheDirective", null, null, null); + logAuditEvent(success, "addCacheDirective", effectiveDirectiveStr, null, null); } RetryCache.setState(cacheEntry, success, result); } @@ -7463,7 +7500,8 @@ void modifyCacheDirective(CacheDirectiveInfo directive, getEditLog().logSync(); } if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "modifyCacheDirective", null, null, null); + String idStr = "{id: " + directive.getId().toString() + "}"; + logAuditEvent(success, "modifyCacheDirective", idStr, directive.toString(), null); } RetryCache.setState(cacheEntry, success); } @@ -7491,7 +7529,8 @@ void removeCacheDirective(Long id) throws IOException { } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removeCacheDirective", null, null, + String idStr = "{id: " + id.toString() + "}"; + logAuditEvent(success, "removeCacheDirective", idStr, null, null); } RetryCache.setState(cacheEntry, success); @@ -7516,7 +7555,7 @@ BatchedListEntries listCacheDirectives( } finally { readUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "listCacheDirectives", null, null, + logAuditEvent(success, "listCacheDirectives", filter.toString(), null, null); } } @@ -7533,6 +7572,7 @@ public void addCachePool(CachePoolInfo req) throws IOException { } writeLock(); boolean success = false; + String poolInfoStr = null; try { checkOperation(OperationCategory.WRITE); if (isInSafeMode()) { @@ -7543,12 +7583,13 @@ public void addCachePool(CachePoolInfo req) throws IOException { pc.checkSuperuserPrivilege(); } CachePoolInfo info = cacheManager.addCachePool(req); + poolInfoStr = info.toString(); getEditLog().logAddCachePool(info, cacheEntry != null); success = true; } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "addCachePool", req.getPoolName(), null, null); + logAuditEvent(success, "addCachePool", poolInfoStr, null, null); } RetryCache.setState(cacheEntry, success); } @@ -7581,7 +7622,8 @@ public void modifyCachePool(CachePoolInfo req) throws IOException { } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null); + String poolNameStr = "{poolName: " + req.getPoolName() + "}"; + logAuditEvent(success, "modifyCachePool", poolNameStr, req.toString(), null); } RetryCache.setState(cacheEntry, success); } @@ -7614,7 +7656,8 @@ public void removeCachePool(String cachePoolName) throws IOException { } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removeCachePool", cachePoolName, null, null); + String poolNameStr = "{poolName: " + cachePoolName + "}"; + logAuditEvent(success, "removeCachePool", poolNameStr, null, null); } RetryCache.setState(cacheEntry, success); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 77be2147a4f..eba7cd43033 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -39,7 +39,7 @@ * snapshots and it is renamed/moved to other locations. * * For example, - * (1) Support we have /abc/foo, say the inode of foo is inode(id=1000,name=foo) + * (1) Suppose we have /abc/foo, say the inode of foo is inode(id=1000,name=foo) * (2) create snapshot s0 for /abc * (3) mv /abc/foo /xyz/bar, i.e. inode(id=1000,name=...) is renamed from "foo" * to "bar" and its parent becomes /xyz. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 09d1ce3df78..a576f1c5bb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -77,7 +77,8 @@ public enum NameNodeFile { IMAGE_ROLLBACK("fsimage_rollback"), EDITS_NEW ("edits.new"), // from "old" pre-HDFS-1073 format EDITS_INPROGRESS ("edits_inprogress"), - EDITS_TMP ("edits_tmp"); + EDITS_TMP ("edits_tmp"), + IMAGE_LEGACY_OIV ("fsimage_legacy_oiv"); // For pre-PB format private String fileName = null; private NameNodeFile(String name) { this.fileName = name; } @@ -693,6 +694,10 @@ public static String getRollbackImageFileName(long txid) { return getNameNodeFileName(NameNodeFile.IMAGE_ROLLBACK, txid); } + public static String getLegacyOIVImageFileName(long txid) { + return getNameNodeFileName(NameNodeFile.IMAGE_LEGACY_OIV, txid); + } + private static String getNameNodeFileName(NameNodeFile nnf, long txid) { return String.format("%s_%019d", nnf.getName(), txid); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java index 7c3b48cf5e6..327f14c7e79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java @@ -18,11 +18,13 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; +import java.io.FilenameFilter; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.EnumSet; +import java.util.Iterator; import java.util.List; import java.util.TreeSet; @@ -233,4 +235,58 @@ private static void deleteOrWarn(File file) { } } } + + /** + * Delete old OIV fsimages. Since the target dir is not a full blown + * storage directory, we simply list and keep the latest ones. For the + * same reason, no storage inspector is used. + */ + void purgeOldLegacyOIVImages(String dir, long txid) { + File oivImageDir = new File(dir); + final String oivImagePrefix = NameNodeFile.IMAGE_LEGACY_OIV.getName(); + String filesInStorage[]; + + // Get the listing + filesInStorage = oivImageDir.list(new FilenameFilter() { + @Override + public boolean accept(File dir, String name) { + return name.matches(oivImagePrefix + "_(\\d+)"); + } + }); + + // Check whether there is any work to do. + if (filesInStorage.length <= numCheckpointsToRetain) { + return; + } + + // Create a sorted list of txids from the file names. + TreeSet sortedTxIds = new TreeSet(); + for (String fName : filesInStorage) { + // Extract the transaction id from the file name. + long fTxId; + try { + fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1)); + } catch (NumberFormatException nfe) { + // This should not happen since we have already filtered it. + // Log and continue. + LOG.warn("Invalid file name. Skipping " + fName); + continue; + } + sortedTxIds.add(Long.valueOf(fTxId)); + } + + int numFilesToDelete = sortedTxIds.size() - numCheckpointsToRetain; + Iterator iter = sortedTxIds.iterator(); + while (numFilesToDelete > 0 && iter.hasNext()) { + long txIdVal = iter.next().longValue(); + String fileName = NNStorage.getLegacyOIVImageFileName(txIdVal); + LOG.info("Deleting " + fileName); + File fileToDelete = new File(oivImageDir, fileName); + if (!fileToDelete.delete()) { + // deletion failed. + LOG.warn("Failed to delete image file: " + fileToDelete); + } + numFilesToDelete--; + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 17d16333164..9cdad26eb24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1677,7 +1677,11 @@ public boolean allowStaleReads() { public boolean isStandbyState() { return (state.equals(STANDBY_STATE)); } - + + public boolean isActiveState() { + return (state.equals(ACTIVE_STATE)); + } + /** * Check that a request to change this node's HA state is valid. * In particular, verifies that, if auto failover is enabled, non-forced diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 752c18b6174..2f3e2902eb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.io.MD5Hash; @@ -125,6 +126,7 @@ public class SecondaryNameNode implements Runnable, private Thread checkpointThread; private ObjectName nameNodeStatusBeanName; + private String legacyOivImageDir; @Override public String toString() { @@ -289,6 +291,9 @@ private void initialize(final Configuration conf, NetUtils.getHostPortString(httpsAddress)); } + legacyOivImageDir = conf.get( + DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); + LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); @@ -497,6 +502,7 @@ private URL getInfoServer() throws IOException { * @return if the image is fetched from primary or not */ @VisibleForTesting + @SuppressWarnings("deprecated") public boolean doCheckpoint() throws IOException { checkpointImage.ensureCurrentDirExists(); NNStorage dstStorage = checkpointImage.getStorage(); @@ -559,11 +565,18 @@ public boolean doCheckpoint() throws IOException { LOG.warn("Checkpoint done. New Image Size: " + dstStorage.getFsImageName(txid).length()); - + + if (legacyOivImageDir != null && !legacyOivImageDir.isEmpty()) { + try { + checkpointImage.saveLegacyOIVImage(namesystem, legacyOivImageDir, + new Canceler()); + } catch (IOException e) { + LOG.warn("Failed to write legacy OIV image: ", e); + } + } return loadImage; } - - + /** * @param opts The parameters passed to this program. * @exception Exception if the filesystem does not exist. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index 8e51b3df1c7..c7a0d6245b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -183,6 +183,12 @@ private void doCheckpoint() throws InterruptedException, IOException { txid = img.getStorage().getMostRecentCheckpointTxId(); assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" + thisCheckpointTxId + " but instead saved at txid=" + txid; + + // Save the legacy OIV image, if the output dir is defined. + String outputDir = checkpointConf.getLegacyOivImageDir(); + if (outputDir != null && !outputDir.isEmpty()) { + img.saveLegacyOIVImage(namesystem, outputDir, canceler); + } } finally { namesystem.longReadUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java index 9952317e802..04e4fc9ecc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java @@ -17,13 +17,17 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import com.google.common.base.Preconditions; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; + import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.Quota; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; -import java.util.List; +import com.google.common.base.Preconditions; /** * The difference of an inode between in two snapshots. @@ -128,4 +132,11 @@ public String toString() { return getClass().getSimpleName() + ": " + this.getSnapshotId() + " (post=" + (posteriorDiff == null? null: posteriorDiff.getSnapshotId()) + ")"; } + + void writeSnapshot(DataOutput out) throws IOException { + out.writeInt(snapshotId); + } + + abstract void write(DataOutput out, ReferenceMap referenceMap + ) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index 40c63ef5531..c82309358e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import java.io.DataOutput; +import java.io.IOException; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; @@ -32,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.server.namenode.Content; import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext; +import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; @@ -39,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.Quota; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.util.Diff; import org.apache.hadoop.hdfs.util.Diff.Container; import org.apache.hadoop.hdfs.util.Diff.ListType; @@ -120,6 +124,35 @@ private Quota.Counts destroyDeletedList( return counts; } + /** Serialize {@link #created} */ + private void writeCreated(DataOutput out) throws IOException { + final List created = getList(ListType.CREATED); + out.writeInt(created.size()); + for (INode node : created) { + // For INode in created list, we only need to record its local name + byte[] name = node.getLocalNameBytes(); + out.writeShort(name.length); + out.write(name); + } + } + + /** Serialize {@link #deleted} */ + private void writeDeleted(DataOutput out, + ReferenceMap referenceMap) throws IOException { + final List deleted = getList(ListType.DELETED); + out.writeInt(deleted.size()); + for (INode node : deleted) { + FSImageSerialization.saveINode2Image(node, out, true, referenceMap); + } + } + + /** Serialize to out */ + private void write(DataOutput out, ReferenceMap referenceMap + ) throws IOException { + writeCreated(out); + writeDeleted(out, referenceMap); + } + /** Get the list of INodeDirectory contained in the deleted list */ private void getDirsInDeleted(List dirList) { for (INode node : getList(ListType.DELETED)) { @@ -314,6 +347,25 @@ int getChildrenSize() { return childrenSize; } + @Override + void write(DataOutput out, ReferenceMap referenceMap) throws IOException { + writeSnapshot(out); + out.writeInt(childrenSize); + + // Write snapshotINode + out.writeBoolean(isSnapshotRoot); + if (!isSnapshotRoot) { + if (snapshotINode != null) { + out.writeBoolean(true); + FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out); + } else { + out.writeBoolean(false); + } + } + // Write diff. Node need to write poseriorDiff, since diffs is a list. + diff.write(out, referenceMap); + } + @Override Quota.Counts destroyDiffAndCollectBlocks(INodeDirectory currentINode, BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java index 8bab9f15453..919ab564c66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java @@ -17,13 +17,17 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.Quota; - -import java.util.List; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; /** * The difference of an {@link INodeFile} between two snapshots. @@ -66,6 +70,20 @@ public String toString() { + (snapshotINode == null? "?": snapshotINode.getFileReplication()); } + @Override + void write(DataOutput out, ReferenceMap referenceMap) throws IOException { + writeSnapshot(out); + out.writeLong(fileSize); + + // write snapshotINode + if (snapshotINode != null) { + out.writeBoolean(true); + FSImageSerialization.writeINodeFileAttributes(snapshotINode, out); + } else { + out.writeBoolean(false); + } + } + @Override Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode, BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index 6a80f5dcb27..59f73f3caef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.Arrays; @@ -30,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; +import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.util.ReadOnlyList; @@ -214,4 +216,11 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "." + root.getLocalName() + "(id=" + id + ")"; } + + /** Serialize the fields to out */ + void write(DataOutput out) throws IOException { + out.writeInt(id); + // write root + FSImageSerialization.writeINodeDirectory(root, out); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index c0527dd89d6..0194898e8c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -29,21 +29,75 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList; import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff; import org.apache.hadoop.hdfs.util.Diff.ListType; -import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader; +import org.apache.hadoop.hdfs.util.ReadOnlyList; /** * A helper class defining static methods for reading/writing snapshot related * information from/to FSImage. */ public class SnapshotFSImageFormat { + /** + * Save snapshots and snapshot quota for a snapshottable directory. + * @param current The directory that the snapshots belongs to. + * @param out The {@link DataOutput} to write. + * @throws IOException + */ + public static void saveSnapshots(INodeDirectorySnapshottable current, + DataOutput out) throws IOException { + // list of snapshots in snapshotsByNames + ReadOnlyList snapshots = current.getSnapshotsByNames(); + out.writeInt(snapshots.size()); + for (Snapshot s : snapshots) { + // write the snapshot id + out.writeInt(s.getId()); + } + // snapshot quota + out.writeInt(current.getSnapshotQuota()); + } + + /** + * Save SnapshotDiff list for an INodeDirectoryWithSnapshot. + * @param sNode The directory that the SnapshotDiff list belongs to. + * @param out The {@link DataOutput} to write. + */ + private static > + void saveINodeDiffs(final AbstractINodeDiffList diffs, + final DataOutput out, ReferenceMap referenceMap) throws IOException { + // Record the diffs in reversed order, so that we can find the correct + // reference for INodes in the created list when loading the FSImage + if (diffs == null) { + out.writeInt(-1); // no diffs + } else { + final List list = diffs.asList(); + final int size = list.size(); + out.writeInt(size); + for (int i = size - 1; i >= 0; i--) { + list.get(i).write(out, referenceMap); + } + } + } + + public static void saveDirectoryDiffList(final INodeDirectory dir, + final DataOutput out, final ReferenceMap referenceMap + ) throws IOException { + saveINodeDiffs(dir.getDiffs(), out, referenceMap); + } + + public static void saveFileDiffList(final INodeFile file, + final DataOutput out) throws IOException { + saveINodeDiffs(file.getDiffs(), out, null); + } + public static FileDiffList loadFileDiffList(DataInput in, FSImageFormat.Loader loader) throws IOException { final int size = in.readInt(); @@ -264,6 +318,23 @@ public static class ReferenceMap { * Used to record whether the subtree of the reference node has been saved */ private final Map dirMap = new HashMap(); + + public void writeINodeReferenceWithCount( + INodeReference.WithCount withCount, DataOutput out, + boolean writeUnderConstruction) throws IOException { + final INode referred = withCount.getReferredINode(); + final long id = withCount.getId(); + final boolean firstReferred = !referenceMap.containsKey(id); + out.writeBoolean(firstReferred); + + if (firstReferred) { + FSImageSerialization.saveINode2Image(referred, out, + writeUnderConstruction, this); + referenceMap.put(id, withCount); + } else { + out.writeLong(id); + } + } public boolean toProcessSubtree(long id) { if (dirMap.containsKey(id)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 51479473011..4f6a6211358 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -287,6 +288,22 @@ INodeDirectorySnapshottable[] getSnapshottableDirs() { return snapshottables.values().toArray( new INodeDirectorySnapshottable[snapshottables.size()]); } + + /** + * Write {@link #snapshotCounter}, {@link #numSnapshots}, + * and all snapshots to the DataOutput. + */ + public void write(DataOutput out) throws IOException { + out.writeInt(snapshotCounter); + out.writeInt(numSnapshots.get()); + + // write all snapshots. + for(INodeDirectorySnapshottable snapshottableDir : snapshottables.values()) { + for(Snapshot s : snapshottableDir.getSnapshotsByNames()) { + s.write(out); + } + } + } /** * Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index b7ac968dede..3325570c7d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -503,19 +503,21 @@ public String getName() { @Override public String getShortUsage() { - return "[" + getName() + " [-stats] [-path ] [-pool ]]\n"; + return "[" + getName() + + " [-stats] [-path ] [-pool ] [-id ]\n"; } @Override public String getLongUsage() { TableListing listing = getOptionDescriptionListing(); + listing.addRow("-stats", "List path-based cache directive statistics."); listing.addRow("", "List only " + "cache directives with this path. " + "Note that if there is a cache directive for " + "in a cache pool that we don't have read access for, it " + "will not be listed."); listing.addRow("", "List only path cache directives in that pool."); - listing.addRow("-stats", "List path-based cache directive statistics."); + listing.addRow("", "List the cache directive with this id."); return getShortUsage() + "\n" + "List cache directives.\n\n" + listing.toString(); @@ -534,6 +536,10 @@ public int run(Configuration conf, List args) throws IOException { builder.setPool(poolFilter); } boolean printStats = StringUtils.popOption("-stats", args); + String idFilter = StringUtils.popOptionWithArgument("-id", args); + if (idFilter != null) { + builder.setId(Long.parseLong(idFilter)); + } if (!args.isEmpty()) { System.err.println("Can't understand argument: " + args.get(0)); return 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index e02dcbbcb93..5c4b49d9b40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -19,6 +19,7 @@ import java.io.PrintStream; import java.util.Arrays; +import java.util.Collection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -27,6 +28,7 @@ import org.apache.hadoop.ha.HAAdmin; import org.apache.hadoop.ha.HAServiceTarget; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.util.ToolRunner; @@ -117,7 +119,15 @@ protected int runCmd(String[] argv) throws Exception { return super.runCmd(argv); } - + + /** + * returns the list of all namenode ids for the given configuration + */ + @Override + protected Collection getTargetIds(String namenodeToActivate) { + return DFSUtil.getNameNodeIds(getConf(), (nameserviceId != null)? nameserviceId : DFSUtil.getNamenodeNameServiceId(getConf())); + } + public static void main(String[] argv) throws Exception { int res = ToolRunner.run(new DFSHAAdmin(), argv); System.exit(res); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java new file mode 100644 index 00000000000..eb6cae3d58a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java @@ -0,0 +1,172 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; + +/** + * A DelimitedImageVisitor generates a text representation of the fsimage, + * with each element separated by a delimiter string. All of the elements + * common to both inodes and inodes-under-construction are included. When + * processing an fsimage with a layout version that did not include an + * element, such as AccessTime, the output file will include a column + * for the value, but no value will be included. + * + * Individual block information for each file is not currently included. + * + * The default delimiter is tab, as this is an unlikely value to be included + * an inode path or other text metadata. The delimiter value can be via the + * constructor. + */ +class DelimitedImageVisitor extends TextWriterImageVisitor { + private static final String defaultDelimiter = "\t"; + + final private LinkedList elemQ = new LinkedList(); + private long fileSize = 0l; + // Elements of fsimage we're interested in tracking + private final Collection elementsToTrack; + // Values for each of the elements in elementsToTrack + private final AbstractMap elements = + new HashMap(); + private final String delimiter; + + { + elementsToTrack = new ArrayList(); + + // This collection determines what elements are tracked and the order + // in which they are output + Collections.addAll(elementsToTrack, ImageElement.INODE_PATH, + ImageElement.REPLICATION, + ImageElement.MODIFICATION_TIME, + ImageElement.ACCESS_TIME, + ImageElement.BLOCK_SIZE, + ImageElement.NUM_BLOCKS, + ImageElement.NUM_BYTES, + ImageElement.NS_QUOTA, + ImageElement.DS_QUOTA, + ImageElement.PERMISSION_STRING, + ImageElement.USER_NAME, + ImageElement.GROUP_NAME); + } + + public DelimitedImageVisitor(String filename) throws IOException { + this(filename, false); + } + + public DelimitedImageVisitor(String outputFile, boolean printToScreen) + throws IOException { + this(outputFile, printToScreen, defaultDelimiter); + } + + public DelimitedImageVisitor(String outputFile, boolean printToScreen, + String delimiter) throws IOException { + super(outputFile, printToScreen); + this.delimiter = delimiter; + reset(); + } + + /** + * Reset the values of the elements we're tracking in order to handle + * the next file + */ + private void reset() { + elements.clear(); + for(ImageElement e : elementsToTrack) + elements.put(e, null); + + fileSize = 0l; + } + + @Override + void leaveEnclosingElement() throws IOException { + ImageElement elem = elemQ.pop(); + + // If we're done with an inode, write out our results and start over + if(elem == ImageElement.INODE || + elem == ImageElement.INODE_UNDER_CONSTRUCTION) { + writeLine(); + write("\n"); + reset(); + } + } + + /** + * Iterate through all the elements we're tracking and, if a value was + * recorded for it, write it out. + */ + private void writeLine() throws IOException { + Iterator it = elementsToTrack.iterator(); + + while(it.hasNext()) { + ImageElement e = it.next(); + + String v = null; + if(e == ImageElement.NUM_BYTES) + v = String.valueOf(fileSize); + else + v = elements.get(e); + + if(v != null) + write(v); + + if(it.hasNext()) + write(delimiter); + } + } + + @Override + void visit(ImageElement element, String value) throws IOException { + // Explicitly label the root path + if(element == ImageElement.INODE_PATH && value.equals("")) + value = "/"; + + // Special case of file size, which is sum of the num bytes in each block + if(element == ImageElement.NUM_BYTES) + fileSize += Long.valueOf(value); + + if(elements.containsKey(element) && element != ImageElement.NUM_BYTES) + elements.put(element, value); + + } + + @Override + void visitEnclosingElement(ImageElement element) throws IOException { + elemQ.push(element); + } + + @Override + void visitEnclosingElement(ImageElement element, ImageElement key, + String value) throws IOException { + // Special case as numBlocks is an attribute of the blocks element + if(key == ImageElement.NUM_BLOCKS + && elements.containsKey(ImageElement.NUM_BLOCKS)) + elements.put(key, value); + + elemQ.push(element); + } + + @Override + void start() throws IOException { /* Nothing to do */ } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java new file mode 100644 index 00000000000..3e6465ef5ab --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Utility class for tracking descent into the structure of the + * Visitor class (ImageVisitor, EditsVisitor etc.) + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class DepthCounter { + private int depth = 0; + + public void incLevel() { depth++; } + public void decLevel() { if(depth >= 1) depth--; } + public int getLevel() { return depth; } +} + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java new file mode 100644 index 00000000000..f293db44d3d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; +import java.util.LinkedList; + +/** + * File size distribution visitor. + * + *

Description.

+ * This is the tool for analyzing file sizes in the namespace image. + * In order to run the tool one should define a range of integers + * [0, maxSize] by specifying maxSize and a step. + * The range of integers is divided into segments of size step: + * [0, s1, ..., sn-1, maxSize], + * and the visitor calculates how many files in the system fall into + * each segment [si-1, si). + * Note that files larger than maxSize always fall into + * the very last segment. + * + *

Input.

+ *
    + *
  • filename specifies the location of the image file;
  • + *
  • maxSize determines the range [0, maxSize] of files + * sizes considered by the visitor;
  • + *
  • step the range is divided into segments of size step.
  • + *
+ * + *

Output.

+ * The output file is formatted as a tab separated two column table: + * Size and NumFiles. Where Size represents the start of the segment, + * and numFiles is the number of files form the image which size falls in + * this segment. + */ +class FileDistributionVisitor extends TextWriterImageVisitor { + final private LinkedList elemS = new LinkedList(); + + private final static long MAX_SIZE_DEFAULT = 0x2000000000L; // 1/8 TB = 2^37 + private final static int INTERVAL_DEFAULT = 0x200000; // 2 MB = 2^21 + + private int[] distribution; + private long maxSize; + private int step; + + private int totalFiles; + private int totalDirectories; + private int totalBlocks; + private long totalSpace; + private long maxFileSize; + + private FileContext current; + + private boolean inInode = false; + + /** + * File or directory information. + */ + private static class FileContext { + String path; + long fileSize; + int numBlocks; + int replication; + } + + public FileDistributionVisitor(String filename, + long maxSize, + int step) throws IOException { + super(filename, false); + this.maxSize = (maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize); + this.step = (step == 0 ? INTERVAL_DEFAULT : step); + long numIntervals = this.maxSize / this.step; + if(numIntervals >= Integer.MAX_VALUE) + throw new IOException("Too many distribution intervals " + numIntervals); + this.distribution = new int[1 + (int)(numIntervals)]; + this.totalFiles = 0; + this.totalDirectories = 0; + this.totalBlocks = 0; + this.totalSpace = 0; + this.maxFileSize = 0; + } + + @Override + void start() throws IOException {} + + @Override + void finish() throws IOException { + output(); + super.finish(); + } + + @Override + void finishAbnormally() throws IOException { + System.out.println("*** Image processing finished abnormally. Ending ***"); + output(); + super.finishAbnormally(); + } + + private void output() throws IOException { + // write the distribution into the output file + write("Size\tNumFiles\n"); + for(int i = 0; i < distribution.length; i++) + write(((long)i * step) + "\t" + distribution[i] + "\n"); + System.out.println("totalFiles = " + totalFiles); + System.out.println("totalDirectories = " + totalDirectories); + System.out.println("totalBlocks = " + totalBlocks); + System.out.println("totalSpace = " + totalSpace); + System.out.println("maxFileSize = " + maxFileSize); + } + + @Override + void leaveEnclosingElement() throws IOException { + ImageElement elem = elemS.pop(); + + if(elem != ImageElement.INODE && + elem != ImageElement.INODE_UNDER_CONSTRUCTION) + return; + inInode = false; + if(current.numBlocks < 0) { + totalDirectories ++; + return; + } + totalFiles++; + totalBlocks += current.numBlocks; + totalSpace += current.fileSize * current.replication; + if(maxFileSize < current.fileSize) + maxFileSize = current.fileSize; + int high; + if(current.fileSize > maxSize) + high = distribution.length-1; + else + high = (int)Math.ceil((double)current.fileSize / step); + distribution[high]++; + if(totalFiles % 1000000 == 1) + System.out.println("Files processed: " + totalFiles + + " Current: " + current.path); + } + + @Override + void visit(ImageElement element, String value) throws IOException { + if(inInode) { + switch(element) { + case INODE_PATH: + current.path = (value.equals("") ? "/" : value); + break; + case REPLICATION: + current.replication = Integer.valueOf(value); + break; + case NUM_BYTES: + current.fileSize += Long.valueOf(value); + break; + default: + break; + } + } + } + + @Override + void visitEnclosingElement(ImageElement element) throws IOException { + elemS.push(element); + if(element == ImageElement.INODE || + element == ImageElement.INODE_UNDER_CONSTRUCTION) { + current = new FileContext(); + inInode = true; + } + } + + @Override + void visitEnclosingElement(ImageElement element, + ImageElement key, String value) throws IOException { + elemS.push(element); + if(element == ImageElement.INODE || + element == ImageElement.INODE_UNDER_CONSTRUCTION) + inInode = true; + else if(element == ImageElement.BLOCKS) + current.numBlocks = Integer.parseInt(value); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java new file mode 100644 index 00000000000..08b0bf58e0a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.DataInputStream; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * An ImageLoader can accept a DataInputStream to an Hadoop FSImage file + * and walk over its structure using the supplied ImageVisitor. + * + * Each implementation of ImageLoader is designed to rapidly process an + * image file. As long as minor changes are made from one layout version + * to another, it is acceptable to tweak one implementation to read the next. + * However, if the layout version changes enough that it would make a + * processor slow or difficult to read, another processor should be created. + * This allows each processor to quickly read an image without getting + * bogged down in dealing with significant differences between layout versions. + */ +interface ImageLoader { + + /** + * @param in DataInputStream pointing to an Hadoop FSImage file + * @param v Visit to apply to the FSImage file + * @param enumerateBlocks Should visitor visit each of the file blocks? + */ + public void loadImage(DataInputStream in, ImageVisitor v, + boolean enumerateBlocks) throws IOException; + + /** + * Can this processor handle the specified version of FSImage file? + * + * @param version FSImage version file + * @return True if this instance can process the file + */ + public boolean canLoadVersion(int version); + + /** + * Factory for obtaining version of image loader that can read + * a particular image format. + */ + @InterfaceAudience.Private + public class LoaderFactory { + // Java doesn't support static methods on interfaces, which necessitates + // this factory class + + /** + * Find an image loader capable of interpreting the specified + * layout version number. If none, return null; + * + * @param version fsimage layout version number to be processed + * @return ImageLoader that can interpret specified version, or null + */ + static public ImageLoader getLoader(int version) { + // Easy to add more image processors as they are written + ImageLoader[] loaders = { new ImageLoaderCurrent() }; + + for (ImageLoader l : loaders) { + if (l.canLoadVersion(version)) + return l; + } + + return null; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java new file mode 100644 index 00000000000..d8b7e3666d6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -0,0 +1,821 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.DataInputStream; +import java.io.IOException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; +import org.apache.hadoop.hdfs.protocol.LayoutFlags; +import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; +import org.apache.hadoop.hdfs.server.namenode.INodeId; +import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; +import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.hadoop.security.token.delegation.DelegationKey; + +/** + * ImageLoaderCurrent processes Hadoop FSImage files and walks over + * them using a provided ImageVisitor, calling the visitor at each element + * enumerated below. + * + * The only difference between v18 and v19 was the utilization of the + * stickybit. Therefore, the same viewer can reader either format. + * + * Versions -19 fsimage layout (with changes from -16 up): + * Image version (int) + * Namepsace ID (int) + * NumFiles (long) + * Generation stamp (long) + * INodes (count = NumFiles) + * INode + * Path (String) + * Replication (short) + * Modification Time (long as date) + * Access Time (long) // added in -16 + * Block size (long) + * Num blocks (int) + * Blocks (count = Num blocks) + * Block + * Block ID (long) + * Num bytes (long) + * Generation stamp (long) + * Namespace Quota (long) + * Diskspace Quota (long) // added in -18 + * Permissions + * Username (String) + * Groupname (String) + * OctalPerms (short -> String) // Modified in -19 + * Symlink (String) // added in -23 + * NumINodesUnderConstruction (int) + * INodesUnderConstruction (count = NumINodesUnderConstruction) + * INodeUnderConstruction + * Path (bytes as string) + * Replication (short) + * Modification time (long as date) + * Preferred block size (long) + * Num blocks (int) + * Blocks + * Block + * Block ID (long) + * Num bytes (long) + * Generation stamp (long) + * Permissions + * Username (String) + * Groupname (String) + * OctalPerms (short -> String) + * Client Name (String) + * Client Machine (String) + * NumLocations (int) + * DatanodeDescriptors (count = numLocations) // not loaded into memory + * short // but still in file + * long + * string + * long + * int + * string + * string + * enum + * CurrentDelegationKeyId (int) + * NumDelegationKeys (int) + * DelegationKeys (count = NumDelegationKeys) + * DelegationKeyLength (vint) + * DelegationKey (bytes) + * DelegationTokenSequenceNumber (int) + * NumDelegationTokens (int) + * DelegationTokens (count = NumDelegationTokens) + * DelegationTokenIdentifier + * owner (String) + * renewer (String) + * realUser (String) + * issueDate (vlong) + * maxDate (vlong) + * sequenceNumber (vint) + * masterKeyId (vint) + * expiryTime (long) + * + */ +class ImageLoaderCurrent implements ImageLoader { + protected final DateFormat dateFormat = + new SimpleDateFormat("yyyy-MM-dd HH:mm"); + private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, + -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, + -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51 }; + private int imageVersion = 0; + + private final Map subtreeMap = new HashMap(); + private final Map dirNodeMap = new HashMap(); + + /* (non-Javadoc) + * @see ImageLoader#canProcessVersion(int) + */ + @Override + public boolean canLoadVersion(int version) { + for(int v : versions) + if(v == version) return true; + + return false; + } + + /* (non-Javadoc) + * @see ImageLoader#processImage(java.io.DataInputStream, ImageVisitor, boolean) + */ + @Override + public void loadImage(DataInputStream in, ImageVisitor v, + boolean skipBlocks) throws IOException { + boolean done = false; + try { + v.start(); + v.visitEnclosingElement(ImageElement.FS_IMAGE); + + imageVersion = in.readInt(); + if( !canLoadVersion(imageVersion)) + throw new IOException("Cannot process fslayout version " + imageVersion); + if (NameNodeLayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imageVersion)) { + LayoutFlags.read(in); + } + + v.visit(ImageElement.IMAGE_VERSION, imageVersion); + v.visit(ImageElement.NAMESPACE_ID, in.readInt()); + + long numInodes = in.readLong(); + + v.visit(ImageElement.GENERATION_STAMP, in.readLong()); + + if (NameNodeLayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) { + v.visit(ImageElement.GENERATION_STAMP_V2, in.readLong()); + v.visit(ImageElement.GENERATION_STAMP_V1_LIMIT, in.readLong()); + v.visit(ImageElement.LAST_ALLOCATED_BLOCK_ID, in.readLong()); + } + + if (NameNodeLayoutVersion.supports(Feature.STORED_TXIDS, imageVersion)) { + v.visit(ImageElement.TRANSACTION_ID, in.readLong()); + } + + if (NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) { + v.visit(ImageElement.LAST_INODE_ID, in.readLong()); + } + + boolean supportSnapshot = NameNodeLayoutVersion.supports(Feature.SNAPSHOT, + imageVersion); + if (supportSnapshot) { + v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt()); + int numSnapshots = in.readInt(); + v.visit(ImageElement.NUM_SNAPSHOTS_TOTAL, numSnapshots); + for (int i = 0; i < numSnapshots; i++) { + processSnapshot(in, v); + } + } + + if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) { + boolean isCompressed = in.readBoolean(); + v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed)); + if (isCompressed) { + String codecClassName = Text.readString(in); + v.visit(ImageElement.COMPRESS_CODEC, codecClassName); + CompressionCodecFactory codecFac = new CompressionCodecFactory( + new Configuration()); + CompressionCodec codec = codecFac.getCodecByClassName(codecClassName); + if (codec == null) { + throw new IOException("Image compression codec not supported: " + + codecClassName); + } + in = new DataInputStream(codec.createInputStream(in)); + } + } + processINodes(in, v, numInodes, skipBlocks, supportSnapshot); + subtreeMap.clear(); + dirNodeMap.clear(); + + processINodesUC(in, v, skipBlocks); + + if (NameNodeLayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) { + processDelegationTokens(in, v); + } + + if (NameNodeLayoutVersion.supports(Feature.CACHING, imageVersion)) { + processCacheManagerState(in, v); + } + v.leaveEnclosingElement(); // FSImage + done = true; + } finally { + if (done) { + v.finish(); + } else { + v.finishAbnormally(); + } + } + } + + /** + * Process CacheManager state from the fsimage. + */ + private void processCacheManagerState(DataInputStream in, ImageVisitor v) + throws IOException { + v.visit(ImageElement.CACHE_NEXT_ENTRY_ID, in.readLong()); + final int numPools = in.readInt(); + for (int i=0; i 0) { + numInodes -= processDirectory(in, v, skipBlocks); + } + } + + private int processDirectory(DataInputStream in, ImageVisitor v, + boolean skipBlocks) throws IOException { + String parentName = FSImageSerialization.readString(in); + return processChildren(in, v, skipBlocks, parentName); + } + + /** + * Process image with local path name and snapshot support + * + * @param in image stream + * @param v visitor + * @param skipBlocks skip blocks or not + */ + private void processLocalNameINodesWithSnapshot(DataInputStream in, + ImageVisitor v, boolean skipBlocks) throws IOException { + // process root + processINode(in, v, skipBlocks, "", false); + processDirectoryWithSnapshot(in, v, skipBlocks); + } + + /** + * Process directories when snapshot is supported. + */ + private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v, + boolean skipBlocks) throws IOException { + // 1. load dir node id + long inodeId = in.readLong(); + + String dirName = dirNodeMap.remove(inodeId); + Boolean visitedRef = subtreeMap.get(inodeId); + if (visitedRef != null) { + if (visitedRef.booleanValue()) { // the subtree has been visited + return; + } else { // first time to visit + subtreeMap.put(inodeId, true); + } + } // else the dir is not linked by a RefNode, thus cannot be revisited + + // 2. load possible snapshots + processSnapshots(in, v, dirName); + // 3. load children nodes + processChildren(in, v, skipBlocks, dirName); + // 4. load possible directory diff list + processDirectoryDiffList(in, v, dirName); + // recursively process sub-directories + final int numSubTree = in.readInt(); + for (int i = 0; i < numSubTree; i++) { + processDirectoryWithSnapshot(in, v, skipBlocks); + } + } + + /** + * Process snapshots of a snapshottable directory + */ + private void processSnapshots(DataInputStream in, ImageVisitor v, + String rootName) throws IOException { + final int numSnapshots = in.readInt(); + if (numSnapshots >= 0) { + v.visitEnclosingElement(ImageElement.SNAPSHOTS, + ImageElement.NUM_SNAPSHOTS, numSnapshots); + for (int i = 0; i < numSnapshots; i++) { + // process snapshot + v.visitEnclosingElement(ImageElement.SNAPSHOT); + v.visit(ImageElement.SNAPSHOT_ID, in.readInt()); + v.leaveEnclosingElement(); + } + v.visit(ImageElement.SNAPSHOT_QUOTA, in.readInt()); + v.leaveEnclosingElement(); + } + } + + private void processSnapshot(DataInputStream in, ImageVisitor v) + throws IOException { + v.visitEnclosingElement(ImageElement.SNAPSHOT); + v.visit(ImageElement.SNAPSHOT_ID, in.readInt()); + // process root of snapshot + v.visitEnclosingElement(ImageElement.SNAPSHOT_ROOT); + processINode(in, v, true, "", false); + v.leaveEnclosingElement(); + v.leaveEnclosingElement(); + } + + private void processDirectoryDiffList(DataInputStream in, ImageVisitor v, + String currentINodeName) throws IOException { + final int numDirDiff = in.readInt(); + if (numDirDiff >= 0) { + v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFFS, + ImageElement.NUM_SNAPSHOT_DIR_DIFF, numDirDiff); + for (int i = 0; i < numDirDiff; i++) { + // process directory diffs in reverse chronological oder + processDirectoryDiff(in, v, currentINodeName); + } + v.leaveEnclosingElement(); + } + } + + private void processDirectoryDiff(DataInputStream in, ImageVisitor v, + String currentINodeName) throws IOException { + v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF); + int snapshotId = in.readInt(); + v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); + v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, in.readInt()); + + // process snapshotINode + boolean useRoot = in.readBoolean(); + if (!useRoot) { + if (in.readBoolean()) { + v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES); + if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) { + processINodeDirectoryAttributes(in, v, currentINodeName); + } else { + processINode(in, v, true, currentINodeName, true); + } + v.leaveEnclosingElement(); + } + } + + // process createdList + int createdSize = in.readInt(); + v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST, + ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE, createdSize); + for (int i = 0; i < createdSize; i++) { + String createdNode = FSImageSerialization.readString(in); + v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CREATED_INODE, createdNode); + } + v.leaveEnclosingElement(); + + // process deletedList + int deletedSize = in.readInt(); + v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST, + ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST_SIZE, deletedSize); + for (int i = 0; i < deletedSize; i++) { + v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETED_INODE); + processINode(in, v, false, currentINodeName, true); + v.leaveEnclosingElement(); + } + v.leaveEnclosingElement(); + v.leaveEnclosingElement(); + } + + private void processINodeDirectoryAttributes(DataInputStream in, ImageVisitor v, + String parentName) throws IOException { + final String pathName = readINodePath(in, parentName); + v.visit(ImageElement.INODE_PATH, pathName); + processPermission(in, v); + v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong())); + + v.visit(ImageElement.NS_QUOTA, in.readLong()); + v.visit(ImageElement.DS_QUOTA, in.readLong()); + } + + /** Process children under a directory */ + private int processChildren(DataInputStream in, ImageVisitor v, + boolean skipBlocks, String parentName) throws IOException { + int numChildren = in.readInt(); + for (int i = 0; i < numChildren; i++) { + processINode(in, v, skipBlocks, parentName, false); + } + return numChildren; + } + + /** + * Process image with full path name + * + * @param in image stream + * @param v visitor + * @param numInodes number of indoes to read + * @param skipBlocks skip blocks or not + * @throws IOException if there is any error occurs + */ + private void processFullNameINodes(DataInputStream in, ImageVisitor v, + long numInodes, boolean skipBlocks) throws IOException { + for(long i = 0; i < numInodes; i++) { + processINode(in, v, skipBlocks, null, false); + } + } + + private String readINodePath(DataInputStream in, String parentName) + throws IOException { + String pathName = FSImageSerialization.readString(in); + if (parentName != null) { // local name + pathName = "/" + pathName; + if (!"/".equals(parentName)) { // children of non-root directory + pathName = parentName + pathName; + } + } + return pathName; + } + + /** + * Process an INode + * + * @param in image stream + * @param v visitor + * @param skipBlocks skip blocks or not + * @param parentName the name of its parent node + * @param isSnapshotCopy whether or not the inode is a snapshot copy + * @throws IOException + */ + private void processINode(DataInputStream in, ImageVisitor v, + boolean skipBlocks, String parentName, boolean isSnapshotCopy) + throws IOException { + boolean supportSnapshot = + NameNodeLayoutVersion.supports(Feature.SNAPSHOT, imageVersion); + boolean supportInodeId = + NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion); + + v.visitEnclosingElement(ImageElement.INODE); + final String pathName = readINodePath(in, parentName); + v.visit(ImageElement.INODE_PATH, pathName); + + long inodeId = INodeId.GRANDFATHER_INODE_ID; + if (supportInodeId) { + inodeId = in.readLong(); + v.visit(ImageElement.INODE_ID, inodeId); + } + v.visit(ImageElement.REPLICATION, in.readShort()); + v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong())); + if(NameNodeLayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) + v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong())); + v.visit(ImageElement.BLOCK_SIZE, in.readLong()); + int numBlocks = in.readInt(); + + processBlocks(in, v, numBlocks, skipBlocks); + + if (numBlocks >= 0) { // File + if (supportSnapshot) { + // make sure subtreeMap only contains entry for directory + subtreeMap.remove(inodeId); + // process file diffs + processFileDiffList(in, v, parentName); + if (isSnapshotCopy) { + boolean underConstruction = in.readBoolean(); + if (underConstruction) { + v.visit(ImageElement.CLIENT_NAME, + FSImageSerialization.readString(in)); + v.visit(ImageElement.CLIENT_MACHINE, + FSImageSerialization.readString(in)); + } + } + } + processPermission(in, v); + } else if (numBlocks == -1) { // Directory + if (supportSnapshot && supportInodeId) { + dirNodeMap.put(inodeId, pathName); + } + v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1); + if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion)) + v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1); + if (supportSnapshot) { + boolean snapshottable = in.readBoolean(); + if (!snapshottable) { + boolean withSnapshot = in.readBoolean(); + v.visit(ImageElement.IS_WITHSNAPSHOT_DIR, Boolean.toString(withSnapshot)); + } else { + v.visit(ImageElement.IS_SNAPSHOTTABLE_DIR, Boolean.toString(snapshottable)); + } + } + processPermission(in, v); + } else if (numBlocks == -2) { + v.visit(ImageElement.SYMLINK, Text.readString(in)); + processPermission(in, v); + } else if (numBlocks == -3) { // reference node + final boolean isWithName = in.readBoolean(); + int snapshotId = in.readInt(); + if (isWithName) { + v.visit(ImageElement.SNAPSHOT_LAST_SNAPSHOT_ID, snapshotId); + } else { + v.visit(ImageElement.SNAPSHOT_DST_SNAPSHOT_ID, snapshotId); + } + + final boolean firstReferred = in.readBoolean(); + if (firstReferred) { + // if a subtree is linked by multiple "parents", the corresponding dir + // must be referred by a reference node. we put the reference node into + // the subtreeMap here and let its value be false. when we later visit + // the subtree for the first time, we change the value to true. + subtreeMap.put(inodeId, false); + v.visitEnclosingElement(ImageElement.SNAPSHOT_REF_INODE); + processINode(in, v, skipBlocks, parentName, isSnapshotCopy); + v.leaveEnclosingElement(); // referred inode + } else { + v.visit(ImageElement.SNAPSHOT_REF_INODE_ID, in.readLong()); + } + } + + v.leaveEnclosingElement(); // INode + } + + private void processINodeFileAttributes(DataInputStream in, ImageVisitor v, + String parentName) throws IOException { + final String pathName = readINodePath(in, parentName); + v.visit(ImageElement.INODE_PATH, pathName); + processPermission(in, v); + v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong())); + if(NameNodeLayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) { + v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong())); + } + + v.visit(ImageElement.REPLICATION, in.readShort()); + v.visit(ImageElement.BLOCK_SIZE, in.readLong()); + } + + private void processFileDiffList(DataInputStream in, ImageVisitor v, + String currentINodeName) throws IOException { + final int size = in.readInt(); + if (size >= 0) { + v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFFS, + ImageElement.NUM_SNAPSHOT_FILE_DIFF, size); + for (int i = 0; i < size; i++) { + processFileDiff(in, v, currentINodeName); + } + v.leaveEnclosingElement(); + } + } + + private void processFileDiff(DataInputStream in, ImageVisitor v, + String currentINodeName) throws IOException { + int snapshotId = in.readInt(); + v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF, + ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); + v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong()); + if (in.readBoolean()) { + v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES); + if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) { + processINodeFileAttributes(in, v, currentINodeName); + } else { + processINode(in, v, true, currentINodeName, true); + } + v.leaveEnclosingElement(); + } + v.leaveEnclosingElement(); + } + + /** + * Helper method to format dates during processing. + * @param date Date as read from image file + * @return String version of date format + */ + private String formatDate(long date) { + return dateFormat.format(new Date(date)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java new file mode 100644 index 00000000000..8914bcf2170 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; + +/** + * An implementation of ImageVisitor can traverse the structure of an + * Hadoop fsimage and respond to each of the structures within the file. + */ +abstract class ImageVisitor { + + /** + * Structural elements of an FSImage that may be encountered within the + * file. ImageVisitors are able to handle processing any of these elements. + */ + public enum ImageElement { + FS_IMAGE, + IMAGE_VERSION, + NAMESPACE_ID, + IS_COMPRESSED, + COMPRESS_CODEC, + LAYOUT_VERSION, + NUM_INODES, + GENERATION_STAMP, + GENERATION_STAMP_V2, + GENERATION_STAMP_V1_LIMIT, + LAST_ALLOCATED_BLOCK_ID, + INODES, + INODE, + INODE_PATH, + REPLICATION, + MODIFICATION_TIME, + ACCESS_TIME, + BLOCK_SIZE, + NUM_BLOCKS, + BLOCKS, + BLOCK, + BLOCK_ID, + NUM_BYTES, + NS_QUOTA, + DS_QUOTA, + PERMISSIONS, + SYMLINK, + NUM_INODES_UNDER_CONSTRUCTION, + INODES_UNDER_CONSTRUCTION, + INODE_UNDER_CONSTRUCTION, + PREFERRED_BLOCK_SIZE, + CLIENT_NAME, + CLIENT_MACHINE, + USER_NAME, + GROUP_NAME, + PERMISSION_STRING, + CURRENT_DELEGATION_KEY_ID, + NUM_DELEGATION_KEYS, + DELEGATION_KEYS, + DELEGATION_KEY, + DELEGATION_TOKEN_SEQUENCE_NUMBER, + NUM_DELEGATION_TOKENS, + DELEGATION_TOKENS, + DELEGATION_TOKEN_IDENTIFIER, + DELEGATION_TOKEN_IDENTIFIER_KIND, + DELEGATION_TOKEN_IDENTIFIER_SEQNO, + DELEGATION_TOKEN_IDENTIFIER_OWNER, + DELEGATION_TOKEN_IDENTIFIER_RENEWER, + DELEGATION_TOKEN_IDENTIFIER_REALUSER, + DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE, + DELEGATION_TOKEN_IDENTIFIER_MAX_DATE, + DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME, + DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID, + TRANSACTION_ID, + LAST_INODE_ID, + INODE_ID, + + SNAPSHOT_COUNTER, + NUM_SNAPSHOTS_TOTAL, + NUM_SNAPSHOTS, + SNAPSHOTS, + SNAPSHOT, + SNAPSHOT_ID, + SNAPSHOT_ROOT, + SNAPSHOT_QUOTA, + NUM_SNAPSHOT_DIR_DIFF, + SNAPSHOT_DIR_DIFFS, + SNAPSHOT_DIR_DIFF, + SNAPSHOT_DIFF_SNAPSHOTID, + SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, + SNAPSHOT_INODE_FILE_ATTRIBUTES, + SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES, + SNAPSHOT_DIR_DIFF_CREATEDLIST, + SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE, + SNAPSHOT_DIR_DIFF_CREATED_INODE, + SNAPSHOT_DIR_DIFF_DELETEDLIST, + SNAPSHOT_DIR_DIFF_DELETEDLIST_SIZE, + SNAPSHOT_DIR_DIFF_DELETED_INODE, + IS_SNAPSHOTTABLE_DIR, + IS_WITHSNAPSHOT_DIR, + SNAPSHOT_FILE_DIFFS, + SNAPSHOT_FILE_DIFF, + NUM_SNAPSHOT_FILE_DIFF, + SNAPSHOT_FILE_SIZE, + SNAPSHOT_DST_SNAPSHOT_ID, + SNAPSHOT_LAST_SNAPSHOT_ID, + SNAPSHOT_REF_INODE_ID, + SNAPSHOT_REF_INODE, + + CACHE_NEXT_ENTRY_ID, + CACHE_NUM_POOLS, + CACHE_POOL_NAME, + CACHE_POOL_OWNER_NAME, + CACHE_POOL_GROUP_NAME, + CACHE_POOL_PERMISSION_STRING, + CACHE_POOL_WEIGHT, + CACHE_NUM_ENTRIES, + CACHE_ENTRY_PATH, + CACHE_ENTRY_REPLICATION, + CACHE_ENTRY_POOL_NAME + } + + /** + * Begin visiting the fsimage structure. Opportunity to perform + * any initialization necessary for the implementing visitor. + */ + abstract void start() throws IOException; + + /** + * Finish visiting the fsimage structure. Opportunity to perform any + * clean up necessary for the implementing visitor. + */ + abstract void finish() throws IOException; + + /** + * Finish visiting the fsimage structure after an error has occurred + * during the processing. Opportunity to perform any clean up necessary + * for the implementing visitor. + */ + abstract void finishAbnormally() throws IOException; + + /** + * Visit non enclosing element of fsimage with specified value. + * + * @param element FSImage element + * @param value Element's value + */ + abstract void visit(ImageElement element, String value) throws IOException; + + // Convenience methods to automatically convert numeric value types to strings + void visit(ImageElement element, int value) throws IOException { + visit(element, Integer.toString(value)); + } + + void visit(ImageElement element, long value) throws IOException { + visit(element, Long.toString(value)); + } + + /** + * Begin visiting an element that encloses another element, such as + * the beginning of the list of blocks that comprise a file. + * + * @param element Element being visited + */ + abstract void visitEnclosingElement(ImageElement element) + throws IOException; + + /** + * Begin visiting an element that encloses another element, such as + * the beginning of the list of blocks that comprise a file. + * + * Also provide an additional key and value for the element, such as the + * number items within the element. + * + * @param element Element being visited + * @param key Key describing the element being visited + * @param value Value associated with element being visited + */ + abstract void visitEnclosingElement(ImageElement element, + ImageElement key, String value) throws IOException; + + // Convenience methods to automatically convert value types to strings + void visitEnclosingElement(ImageElement element, + ImageElement key, int value) + throws IOException { + visitEnclosingElement(element, key, Integer.toString(value)); + } + + void visitEnclosingElement(ImageElement element, + ImageElement key, long value) + throws IOException { + visitEnclosingElement(element, key, Long.toString(value)); + } + + /** + * Leave current enclosing element. Called, for instance, at the end of + * processing the blocks that compromise a file. + */ + abstract void leaveEnclosingElement() throws IOException; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java new file mode 100644 index 00000000000..7a027039d52 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; +import java.util.Date; + +/** + * IndentedImageVisitor walks over an FSImage and displays its structure + * using indenting to organize sections within the image file. + */ +class IndentedImageVisitor extends TextWriterImageVisitor { + + public IndentedImageVisitor(String filename) throws IOException { + super(filename); + } + + public IndentedImageVisitor(String filename, boolean printToScreen) throws IOException { + super(filename, printToScreen); + } + + final private DepthCounter dc = new DepthCounter();// to track leading spacing + + @Override + void start() throws IOException {} + + @Override + void finish() throws IOException { super.finish(); } + + @Override + void finishAbnormally() throws IOException { + System.out.println("*** Image processing finished abnormally. Ending ***"); + super.finishAbnormally(); + } + + @Override + void leaveEnclosingElement() throws IOException { + dc.decLevel(); + } + + @Override + void visit(ImageElement element, String value) throws IOException { + printIndents(); + write(element + " = " + value + "\n"); + } + + @Override + void visit(ImageElement element, long value) throws IOException { + if ((element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME) || + (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE) || + (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE)) { + visit(element, new Date(value).toString()); + } else { + visit(element, Long.toString(value)); + } + } + + @Override + void visitEnclosingElement(ImageElement element) throws IOException { + printIndents(); + write(element + "\n"); + dc.incLevel(); + } + + // Print element, along with associated key/value pair, in brackets + @Override + void visitEnclosingElement(ImageElement element, + ImageElement key, String value) + throws IOException { + printIndents(); + write(element + " [" + key + " = " + value + "]\n"); + dc.incLevel(); + } + + /** + * Print an appropriate number of spaces for the current level. + * FsImages can potentially be millions of lines long, so caching can + * significantly speed up output. + */ + final private static String [] indents = { "", + " ", + " ", + " ", + " ", + " ", + " "}; + private void printIndents() throws IOException { + try { + write(indents[dc.getLevel()]); + } catch (IndexOutOfBoundsException e) { + // There's no reason in an fsimage would need a deeper indent + for(int i = 0; i < dc.getLevel(); i++) + write(" "); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java new file mode 100644 index 00000000000..6e303a9f161 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; +import java.util.Formatter; +import java.util.LinkedList; + +/** + * LsImageVisitor displays the blocks of the namespace in a format very similar + * to the output of ls/lsr. Entries are marked as directories or not, + * permissions listed, replication, username and groupname, along with size, + * modification date and full path. + * + * Note: A significant difference between the output of the lsr command + * and this image visitor is that this class cannot sort the file entries; + * they are listed in the order they are stored within the fsimage file. + * Therefore, the output of this class cannot be directly compared to the + * output of the lsr command. + */ +class LsImageVisitor extends TextWriterImageVisitor { + final private LinkedList elemQ = new LinkedList(); + + private int numBlocks; + private String perms; + private int replication; + private String username; + private String group; + private long filesize; + private String modTime; + private String path; + private String linkTarget; + + private boolean inInode = false; + final private StringBuilder sb = new StringBuilder(); + final private Formatter formatter = new Formatter(sb); + + public LsImageVisitor(String filename) throws IOException { + super(filename); + } + + public LsImageVisitor(String filename, boolean printToScreen) throws IOException { + super(filename, printToScreen); + } + + /** + * Start a new line of output, reset values. + */ + private void newLine() { + numBlocks = 0; + perms = username = group = path = linkTarget = ""; + filesize = 0l; + replication = 0; + + inInode = true; + } + + /** + * All the values have been gathered. Print them to the console in an + * ls-style format. + */ + private final static int widthRepl = 2; + private final static int widthUser = 8; + private final static int widthGroup = 10; + private final static int widthSize = 10; + private final static int widthMod = 10; + private final static String lsStr = " %" + widthRepl + "s %" + widthUser + + "s %" + widthGroup + "s %" + widthSize + + "d %" + widthMod + "s %s"; + private void printLine() throws IOException { + sb.append(numBlocks < 0 ? "d" : "-"); + sb.append(perms); + + if (0 != linkTarget.length()) { + path = path + " -> " + linkTarget; + } + formatter.format(lsStr, replication > 0 ? replication : "-", + username, group, filesize, modTime, path); + sb.append("\n"); + + write(sb.toString()); + sb.setLength(0); // clear string builder + + inInode = false; + } + + @Override + void start() throws IOException {} + + @Override + void finish() throws IOException { + super.finish(); + } + + @Override + void finishAbnormally() throws IOException { + System.out.println("Input ended unexpectedly."); + super.finishAbnormally(); + } + + @Override + void leaveEnclosingElement() throws IOException { + ImageElement elem = elemQ.pop(); + + if(elem == ImageElement.INODE) + printLine(); + } + + // Maintain state of location within the image tree and record + // values needed to display the inode in ls-style format. + @Override + void visit(ImageElement element, String value) throws IOException { + if(inInode) { + switch(element) { + case INODE_PATH: + if(value.equals("")) path = "/"; + else path = value; + break; + case PERMISSION_STRING: + perms = value; + break; + case REPLICATION: + replication = Integer.valueOf(value); + break; + case USER_NAME: + username = value; + break; + case GROUP_NAME: + group = value; + break; + case NUM_BYTES: + filesize += Long.valueOf(value); + break; + case MODIFICATION_TIME: + modTime = value; + break; + case SYMLINK: + linkTarget = value; + break; + default: + // This is OK. We're not looking for all the values. + break; + } + } + } + + @Override + void visitEnclosingElement(ImageElement element) throws IOException { + elemQ.push(element); + if(element == ImageElement.INODE) + newLine(); + } + + @Override + void visitEnclosingElement(ImageElement element, + ImageElement key, String value) throws IOException { + elemQ.push(element); + if(element == ImageElement.INODE) + newLine(); + else if (element == ImageElement.BLOCKS) + numBlocks = Integer.valueOf(value); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java new file mode 100644 index 00000000000..28bcf103ee6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map.Entry; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * File name distribution visitor. + *

+ * It analyzes file names in fsimage and prints the following information: + *

  • Number of unique file names
  • + *
  • Number file names and the corresponding number range of files that use + * these same names
  • + *
  • Heap saved if the file name objects are reused
  • + */ +@InterfaceAudience.Private +public class NameDistributionVisitor extends TextWriterImageVisitor { + HashMap counts = new HashMap(); + + public NameDistributionVisitor(String filename, boolean printToScreen) + throws IOException { + super(filename, printToScreen); + } + + @Override + void finish() throws IOException { + final int BYTEARRAY_OVERHEAD = 24; + + write("Total unique file names " + counts.size()); + // Columns: Frequency of file occurrence, savings in heap, total files using + // the name and number of file names + final long stats[][] = { { 100000, 0, 0, 0 }, + { 10000, 0, 0, 0 }, + { 1000, 0, 0, 0 }, + { 100, 0, 0, 0 }, + { 10, 0, 0, 0 }, + { 5, 0, 0, 0 }, + { 4, 0, 0, 0 }, + { 3, 0, 0, 0 }, + { 2, 0, 0, 0 }}; + + int highbound = Integer.MIN_VALUE; + for (Entry entry : counts.entrySet()) { + highbound = Math.max(highbound, entry.getValue()); + for (int i = 0; i < stats.length; i++) { + if (entry.getValue() >= stats[i][0]) { + stats[i][1] += (BYTEARRAY_OVERHEAD + entry.getKey().length()) + * (entry.getValue() - 1); + stats[i][2] += entry.getValue(); + stats[i][3]++; + break; + } + } + } + + long lowbound = 0; + long totalsavings = 0; + for (long[] stat : stats) { + lowbound = stat[0]; + totalsavings += stat[1]; + String range = lowbound == highbound ? " " + lowbound : + " between " + lowbound + "-" + highbound; + write("\n" + stat[3] + " names are used by " + stat[2] + " files" + + range + " times. Heap savings ~" + stat[1] + " bytes."); + highbound = (int) stat[0] - 1; + } + write("\n\nTotal saved heap ~" + totalsavings + "bytes.\n"); + super.finish(); + } + + @Override + void visit(ImageElement element, String value) throws IOException { + if (element == ImageElement.INODE_PATH) { + String filename = value.substring(value.lastIndexOf("/") + 1); + if (counts.containsKey(filename)) { + counts.put(filename, counts.get(filename) + 1); + } else { + counts.put(filename, 1); + } + } + } + + @Override + void leaveEnclosingElement() throws IOException { + } + + @Override + void start() throws IOException { + } + + @Override + void visitEnclosingElement(ImageElement element) throws IOException { + } + + @Override + void visitEnclosingElement(ImageElement element, ImageElement key, + String value) throws IOException { + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java new file mode 100644 index 00000000000..08985d6162e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java @@ -0,0 +1,274 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.PosixParser; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream; + +/** + * OfflineImageViewer to dump the contents of an Hadoop image file to XML + * or the console. Main entry point into utility, either via the + * command line or programatically. + */ +@InterfaceAudience.Private +public class OfflineImageViewer { + public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class); + + private final static String usage = + "Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" + + "Offline Image Viewer\n" + + "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n" + + "saving the results in OUTPUTFILE.\n" + + "\n" + + "The oiv utility will attempt to parse correctly formed image files\n" + + "and will abort fail with mal-formed image files.\n" + + "\n" + + "The tool works offline and does not require a running cluster in\n" + + "order to process an image file.\n" + + "\n" + + "The following image processors are available:\n" + + " * Ls: The default image processor generates an lsr-style listing\n" + + " of the files in the namespace, with the same fields in the same\n" + + " order. Note that in order to correctly determine file sizes,\n" + + " this formatter cannot skip blocks and will override the\n" + + " -skipBlocks option.\n" + + " * Indented: This processor enumerates over all of the elements in\n" + + " the fsimage file, using levels of indentation to delineate\n" + + " sections within the file.\n" + + " * Delimited: Generate a text file with all of the elements common\n" + + " to both inodes and inodes-under-construction, separated by a\n" + + " delimiter. The default delimiter is \u0001, though this may be\n" + + " changed via the -delimiter argument. This processor also overrides\n" + + " the -skipBlocks option for the same reason as the Ls processor\n" + + " * XML: This processor creates an XML document with all elements of\n" + + " the fsimage enumerated, suitable for further analysis by XML\n" + + " tools.\n" + + " * FileDistribution: This processor analyzes the file size\n" + + " distribution in the image.\n" + + " -maxSize specifies the range [0, maxSize] of file sizes to be\n" + + " analyzed (128GB by default).\n" + + " -step defines the granularity of the distribution. (2MB by default)\n" + + " * NameDistribution: This processor analyzes the file names\n" + + " in the image and prints total number of file names and how frequently\n" + + " file names are reused.\n" + + "\n" + + "Required command line arguments:\n" + + "-i,--inputFile FSImage file to process.\n" + + "-o,--outputFile Name of output file. If the specified\n" + + " file exists, it will be overwritten.\n" + + "\n" + + "Optional command line arguments:\n" + + "-p,--processor Select which type of processor to apply\n" + + " against image file." + + " (Ls|XML|Delimited|Indented|FileDistribution).\n" + + "-h,--help Display usage information and exit\n" + + "-printToScreen For processors that write to a file, also\n" + + " output to screen. On large image files this\n" + + " will dramatically increase processing time.\n" + + "-skipBlocks Skip inodes' blocks information. May\n" + + " significantly decrease output.\n" + + " (default = false).\n" + + "-delimiter Delimiting string to use with Delimited processor\n"; + + private final boolean skipBlocks; + private final String inputFile; + private final ImageVisitor processor; + + public OfflineImageViewer(String inputFile, ImageVisitor processor, + boolean skipBlocks) { + this.inputFile = inputFile; + this.processor = processor; + this.skipBlocks = skipBlocks; + } + + /** + * Process image file. + */ + public void go() throws IOException { + DataInputStream in = null; + PositionTrackingInputStream tracker = null; + ImageLoader fsip = null; + boolean done = false; + try { + tracker = new PositionTrackingInputStream(new BufferedInputStream( + new FileInputStream(new File(inputFile)))); + in = new DataInputStream(tracker); + + int imageVersionFile = findImageVersion(in); + + fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile); + + if(fsip == null) + throw new IOException("No image processor to read version " + + imageVersionFile + " is available."); + fsip.loadImage(in, processor, skipBlocks); + done = true; + } finally { + if (!done) { + LOG.error("image loading failed at offset " + tracker.getPos()); + } + IOUtils.cleanup(LOG, in, tracker); + } + } + + /** + * Check an fsimage datainputstream's version number. + * + * The datainput stream is returned at the same point as it was passed in; + * this method has no effect on the datainputstream's read pointer. + * + * @param in Datainputstream of fsimage + * @return Filesystem layout version of fsimage represented by stream + * @throws IOException If problem reading from in + */ + private int findImageVersion(DataInputStream in) throws IOException { + in.mark(42); // arbitrary amount, resetting immediately + + int version = in.readInt(); + in.reset(); + + return version; + } + + /** + * Build command-line options and descriptions + */ + public static Options buildOptions() { + Options options = new Options(); + + // Build in/output file arguments, which are required, but there is no + // addOption method that can specify this + OptionBuilder.isRequired(); + OptionBuilder.hasArgs(); + OptionBuilder.withLongOpt("outputFile"); + options.addOption(OptionBuilder.create("o")); + + OptionBuilder.isRequired(); + OptionBuilder.hasArgs(); + OptionBuilder.withLongOpt("inputFile"); + options.addOption(OptionBuilder.create("i")); + + options.addOption("p", "processor", true, ""); + options.addOption("h", "help", false, ""); + options.addOption("skipBlocks", false, ""); + options.addOption("printToScreen", false, ""); + options.addOption("delimiter", true, ""); + + return options; + } + + /** + * Entry point to command-line-driven operation. User may specify + * options and start fsimage viewer from the command line. Program + * will process image file and exit cleanly or, if an error is + * encountered, inform user and exit. + * + * @param args Command line options + * @throws IOException + */ + public static void main(String[] args) throws IOException { + Options options = buildOptions(); + if(args.length == 0) { + printUsage(); + return; + } + + CommandLineParser parser = new PosixParser(); + CommandLine cmd; + + try { + cmd = parser.parse(options, args); + } catch (ParseException e) { + System.out.println("Error parsing command-line options: "); + printUsage(); + return; + } + + if(cmd.hasOption("h")) { // print help and exit + printUsage(); + return; + } + + boolean skipBlocks = cmd.hasOption("skipBlocks"); + boolean printToScreen = cmd.hasOption("printToScreen"); + String inputFile = cmd.getOptionValue("i"); + String processor = cmd.getOptionValue("p", "Ls"); + String outputFile = cmd.getOptionValue("o"); + String delimiter = cmd.getOptionValue("delimiter"); + + if( !(delimiter == null || processor.equals("Delimited")) ) { + System.out.println("Can only specify -delimiter with Delimited processor"); + printUsage(); + return; + } + + ImageVisitor v; + if(processor.equals("Indented")) { + v = new IndentedImageVisitor(outputFile, printToScreen); + } else if (processor.equals("XML")) { + v = new XmlImageVisitor(outputFile, printToScreen); + } else if (processor.equals("Delimited")) { + v = delimiter == null ? + new DelimitedImageVisitor(outputFile, printToScreen) : + new DelimitedImageVisitor(outputFile, printToScreen, delimiter); + skipBlocks = false; + } else if (processor.equals("FileDistribution")) { + long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0")); + int step = Integer.parseInt(cmd.getOptionValue("step", "0")); + v = new FileDistributionVisitor(outputFile, maxSize, step); + } else if (processor.equals("NameDistribution")) { + v = new NameDistributionVisitor(outputFile, printToScreen); + } else { + v = new LsImageVisitor(outputFile, printToScreen); + skipBlocks = false; + } + + try { + OfflineImageViewer d = new OfflineImageViewer(inputFile, v, skipBlocks); + d.go(); + } catch (EOFException e) { + System.err.println("Input file ended unexpectedly. Exiting"); + } catch(IOException e) { + System.err.println("Encountered exception. Exiting: " + e.getMessage()); + } + } + + /** + * Print application usage instructions. + */ + private static void printUsage() { + System.out.println(usage); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java new file mode 100644 index 00000000000..972701b60c0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; + +import com.google.common.base.Charsets; + +/** + * TextWriterImageProcessor mixes in the ability for ImageVisitor + * implementations to easily write their output to a text file. + * + * Implementing classes should be sure to call the super methods for the + * constructors, finish and finishAbnormally methods, in order that the + * underlying file may be opened and closed correctly. + * + * Note, this class does not add newlines to text written to file or (if + * enabled) screen. This is the implementing class' responsibility. + */ +abstract class TextWriterImageVisitor extends ImageVisitor { + private boolean printToScreen = false; + private boolean okToWrite = false; + final private OutputStreamWriter fw; + + /** + * Create a processor that writes to the file named. + * + * @param filename Name of file to write output to + */ + public TextWriterImageVisitor(String filename) throws IOException { + this(filename, false); + } + + /** + * Create a processor that writes to the file named and may or may not + * also output to the screen, as specified. + * + * @param filename Name of file to write output to + * @param printToScreen Mirror output to screen? + */ + public TextWriterImageVisitor(String filename, boolean printToScreen) + throws IOException { + super(); + this.printToScreen = printToScreen; + fw = new OutputStreamWriter(new FileOutputStream(filename), Charsets.UTF_8); + okToWrite = true; + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor#finish() + */ + @Override + void finish() throws IOException { + close(); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor#finishAbnormally() + */ + @Override + void finishAbnormally() throws IOException { + close(); + } + + /** + * Close output stream and prevent further writing + */ + private void close() throws IOException { + fw.close(); + okToWrite = false; + } + + /** + * Write parameter to output file (and possibly screen). + * + * @param toWrite Text to write to file + */ + protected void write(String toWrite) throws IOException { + if(!okToWrite) + throw new IOException("file not open for writing."); + + if(printToScreen) + System.out.print(toWrite); + + try { + fw.write(toWrite); + } catch (IOException e) { + okToWrite = false; + throw e; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java new file mode 100644 index 00000000000..939eb0c8654 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; +import java.util.LinkedList; + +/** + * An XmlImageVisitor walks over an fsimage structure and writes out + * an equivalent XML document that contains the fsimage's components. + */ +public class XmlImageVisitor extends TextWriterImageVisitor { + final private LinkedList tagQ = + new LinkedList(); + + public XmlImageVisitor(String filename) throws IOException { + super(filename, false); + } + + public XmlImageVisitor(String filename, boolean printToScreen) + throws IOException { + super(filename, printToScreen); + } + + @Override + void finish() throws IOException { + super.finish(); + } + + @Override + void finishAbnormally() throws IOException { + write("\n\n"); + super.finishAbnormally(); + } + + @Override + void leaveEnclosingElement() throws IOException { + if(tagQ.size() == 0) + throw new IOException("Tried to exit non-existent enclosing element " + + "in FSImage file"); + + ImageElement element = tagQ.pop(); + write("\n"); + } + + @Override + void start() throws IOException { + write("\n"); + } + + @Override + void visit(ImageElement element, String value) throws IOException { + writeTag(element.toString(), value); + } + + @Override + void visitEnclosingElement(ImageElement element) throws IOException { + write("<" + element.toString() + ">\n"); + tagQ.push(element); + } + + @Override + void visitEnclosingElement(ImageElement element, + ImageElement key, String value) + throws IOException { + write("<" + element.toString() + " " + key + "=\"" + value +"\">\n"); + tagQ.push(element); + } + + private void writeTag(String tag, String value) throws IOException { + write("<" + tag + ">" + value + "\n"); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 9fcec77de1e..ca8ce0a4414 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -176,8 +176,9 @@ private static String toString(final FsPermission permission) { } /** Convert a string to a FsPermission object. */ - private static FsPermission toFsPermission(final String s) { - return new FsPermission(Short.parseShort(s, 8)); + private static FsPermission toFsPermission(final String s, Boolean aclBit) { + FsPermission perm = new FsPermission(Short.parseShort(s, 8)); + return (aclBit != null && aclBit) ? new FsAclPermission(perm) : perm; } static enum PathType { @@ -204,7 +205,11 @@ public static String toJsonString(final HdfsFileStatus status, m.put("length", status.getLen()); m.put("owner", status.getOwner()); m.put("group", status.getGroup()); - m.put("permission", toString(status.getPermission())); + FsPermission perm = status.getPermission(); + m.put("permission", toString(perm)); + if (perm.getAclBit()) { + m.put("aclBit", true); + } m.put("accessTime", status.getAccessTime()); m.put("modificationTime", status.getModificationTime()); m.put("blockSize", status.getBlockSize()); @@ -230,7 +235,8 @@ public static HdfsFileStatus toFileStatus(final Map json, boolean includes final long len = (Long) m.get("length"); final String owner = (String) m.get("owner"); final String group = (String) m.get("group"); - final FsPermission permission = toFsPermission((String) m.get("permission")); + final FsPermission permission = toFsPermission((String) m.get("permission"), + (Boolean)m.get("aclBit")); final long aTime = (Long) m.get("accessTime"); final long mTime = (Long) m.get("modificationTime"); final long blockSize = (Long) m.get("blockSize"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c new file mode 100644 index 00000000000..93d205ec780 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c @@ -0,0 +1,804 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" +#include "hdfs.h" + +#define VECSUM_CHUNK_SIZE (8 * 1024 * 1024) +#define ZCR_READ_CHUNK_SIZE (1024 * 1024 * 8) +#define NORMAL_READ_CHUNK_SIZE (8 * 1024 * 1024) +#define DOUBLES_PER_LOOP_ITER 16 + +static double timespec_to_double(const struct timespec *ts) +{ + double sec = ts->tv_sec; + double nsec = ts->tv_nsec; + return sec + (nsec / 1000000000L); +} + +struct stopwatch { + struct timespec start; + struct timespec stop; + struct rusage rusage; +}; + +static struct stopwatch *stopwatch_create(void) +{ + struct stopwatch *watch; + + watch = calloc(1, sizeof(struct stopwatch)); + if (!watch) { + fprintf(stderr, "failed to allocate memory for stopwatch\n"); + goto error; + } + if (clock_gettime(CLOCK_MONOTONIC, &watch->start)) { + int err = errno; + fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with " + "error %d (%s)\n", err, strerror(err)); + goto error; + } + if (getrusage(RUSAGE_THREAD, &watch->rusage) < 0) { + int err = errno; + fprintf(stderr, "getrusage failed: error %d (%s)\n", + err, strerror(err)); + goto error; + } + return watch; + +error: + free(watch); + return NULL; +} + +static void stopwatch_stop(struct stopwatch *watch, + long long bytes_read) +{ + double elapsed, rate; + + if (clock_gettime(CLOCK_MONOTONIC, &watch->stop)) { + int err = errno; + fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with " + "error %d (%s)\n", err, strerror(err)); + goto done; + } + elapsed = timespec_to_double(&watch->stop) - + timespec_to_double(&watch->start); + rate = (bytes_read / elapsed) / (1024 * 1024 * 1024); + printf("stopwatch: took %.5g seconds to read %lld bytes, " + "for %.5g GB/s\n", elapsed, bytes_read, rate); + printf("stopwatch: %.5g seconds\n", elapsed); +done: + free(watch); +} + +enum vecsum_type { + VECSUM_LOCAL = 0, + VECSUM_LIBHDFS, + VECSUM_ZCR, +}; + +#define VECSUM_TYPE_VALID_VALUES "libhdfs, zcr, or local" + +int parse_vecsum_type(const char *str) +{ + if (strcasecmp(str, "local") == 0) + return VECSUM_LOCAL; + else if (strcasecmp(str, "libhdfs") == 0) + return VECSUM_LIBHDFS; + else if (strcasecmp(str, "zcr") == 0) + return VECSUM_ZCR; + else + return -1; +} + +struct options { + // The path to read. + const char *path; + + // Length of the file. + long long length; + + // The number of times to read the path. + int passes; + + // Type of vecsum to do + enum vecsum_type ty; + + // RPC address to use for HDFS + const char *rpc_address; +}; + +static struct options *options_create(void) +{ + struct options *opts = NULL; + const char *pass_str; + const char *ty_str; + const char *length_str; + int ty; + + opts = calloc(1, sizeof(struct options)); + if (!opts) { + fprintf(stderr, "failed to calloc options\n"); + goto error; + } + opts->path = getenv("VECSUM_PATH"); + if (!opts->path) { + fprintf(stderr, "You must set the VECSUM_PATH environment " + "variable to the path of the file to read.\n"); + goto error; + } + length_str = getenv("VECSUM_LENGTH"); + if (!length_str) { + length_str = "2147483648"; + } + opts->length = atoll(length_str); + if (!opts->length) { + fprintf(stderr, "Can't parse VECSUM_LENGTH of '%s'.\n", + length_str); + goto error; + } + if (opts->length % VECSUM_CHUNK_SIZE) { + fprintf(stderr, "VECSUM_LENGTH must be a multiple of '%lld'. The " + "currently specified length of '%lld' is not.\n", + (long long)VECSUM_CHUNK_SIZE, (long long)opts->length); + goto error; + } + pass_str = getenv("VECSUM_PASSES"); + if (!pass_str) { + fprintf(stderr, "You must set the VECSUM_PASSES environment " + "variable to the number of passes to make.\n"); + goto error; + } + opts->passes = atoi(pass_str); + if (opts->passes <= 0) { + fprintf(stderr, "Invalid value for the VECSUM_PASSES " + "environment variable. You must set this to a " + "number greater than 0.\n"); + goto error; + } + ty_str = getenv("VECSUM_TYPE"); + if (!ty_str) { + fprintf(stderr, "You must set the VECSUM_TYPE environment " + "variable to " VECSUM_TYPE_VALID_VALUES "\n"); + goto error; + } + ty = parse_vecsum_type(ty_str); + if (ty < 0) { + fprintf(stderr, "Invalid VECSUM_TYPE environment variable. " + "Valid values are " VECSUM_TYPE_VALID_VALUES "\n"); + goto error; + } + opts->ty = ty; + opts->rpc_address = getenv("VECSUM_RPC_ADDRESS"); + if (!opts->rpc_address) { + opts->rpc_address = "default"; + } + return opts; +error: + free(opts); + return NULL; +} + +static int test_file_chunk_setup(double **chunk) +{ + int i; + double *c, val; + + c = malloc(VECSUM_CHUNK_SIZE); + if (!c) { + fprintf(stderr, "test_file_create: failed to malloc " + "a buffer of size '%lld'\n", + (long long) VECSUM_CHUNK_SIZE); + return EIO; + } + val = 0.0; + for (i = 0; i < VECSUM_CHUNK_SIZE / sizeof(double); i++) { + c[i] = val; + val += 0.5; + } + *chunk = c; + return 0; +} + +static void options_free(struct options *opts) +{ + free(opts); +} + +struct local_data { + int fd; + double *mmap; + long long length; +}; + +static int local_data_create_file(struct local_data *cdata, + const struct options *opts) +{ + int ret = EIO; + int dup_fd = -1; + FILE *fp = NULL; + double *chunk = NULL; + long long offset = 0; + + dup_fd = dup(cdata->fd); + if (dup_fd < 0) { + ret = errno; + fprintf(stderr, "local_data_create_file: dup failed: %s (%d)\n", + strerror(ret), ret); + goto done; + } + fp = fdopen(dup_fd, "w"); + if (!fp) { + ret = errno; + fprintf(stderr, "local_data_create_file: fdopen failed: %s (%d)\n", + strerror(ret), ret); + goto done; + } + ret = test_file_chunk_setup(&chunk); + if (ret) + goto done; + while (offset < opts->length) { + if (fwrite(chunk, VECSUM_CHUNK_SIZE, 1, fp) != 1) { + fprintf(stderr, "local_data_create_file: failed to write to " + "the local file '%s' at offset %lld\n", + opts->path, offset); + ret = EIO; + goto done; + } + offset += VECSUM_CHUNK_SIZE; + } + fprintf(stderr, "local_data_create_file: successfully re-wrote %s as " + "a file of length %lld\n", opts->path, opts->length); + ret = 0; + +done: + if (dup_fd >= 0) { + close(dup_fd); + } + if (fp) { + fclose(fp); + } + free(chunk); + return ret; +} + +static struct local_data *local_data_create(const struct options *opts) +{ + struct local_data *cdata = NULL; + struct stat st_buf; + + cdata = malloc(sizeof(*cdata)); + if (!cdata) { + fprintf(stderr, "Failed to allocate local test data.\n"); + goto error; + } + cdata->fd = -1; + cdata->mmap = MAP_FAILED; + cdata->length = opts->length; + + cdata->fd = open(opts->path, O_RDWR | O_CREAT, 0777); + if (cdata->fd < 0) { + int err = errno; + fprintf(stderr, "local_data_create: failed to open %s " + "for read/write: error %d (%s)\n", opts->path, err, strerror(err)); + goto error; + } + if (fstat(cdata->fd, &st_buf)) { + int err = errno; + fprintf(stderr, "local_data_create: fstat(%s) failed: " + "error %d (%s)\n", opts->path, err, strerror(err)); + goto error; + } + if (st_buf.st_size != opts->length) { + int err; + fprintf(stderr, "local_data_create: current size of %s is %lld, but " + "we want %lld. Re-writing the file.\n", + opts->path, (long long)st_buf.st_size, + (long long)opts->length); + err = local_data_create_file(cdata, opts); + if (err) + goto error; + } + cdata->mmap = mmap(NULL, cdata->length, PROT_READ, + MAP_PRIVATE, cdata->fd, 0); + if (cdata->mmap == MAP_FAILED) { + int err = errno; + fprintf(stderr, "local_data_create: mmap(%s) failed: " + "error %d (%s)\n", opts->path, err, strerror(err)); + goto error; + } + return cdata; + +error: + if (cdata) { + if (cdata->fd >= 0) { + close(cdata->fd); + } + free(cdata); + } + return NULL; +} + +static void local_data_free(struct local_data *cdata) +{ + close(cdata->fd); + munmap(cdata->mmap, cdata->length); +} + +struct libhdfs_data { + hdfsFS fs; + hdfsFile file; + long long length; + double *buf; +}; + +static void libhdfs_data_free(struct libhdfs_data *ldata) +{ + if (ldata->fs) { + free(ldata->buf); + if (ldata->file) { + hdfsCloseFile(ldata->fs, ldata->file); + } + hdfsDisconnect(ldata->fs); + } + free(ldata); +} + +static int libhdfs_data_create_file(struct libhdfs_data *ldata, + const struct options *opts) +{ + int ret; + double *chunk = NULL; + long long offset = 0; + + ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_WRONLY, 0, 1, 0); + if (!ldata->file) { + ret = errno; + fprintf(stderr, "libhdfs_data_create_file: hdfsOpenFile(%s, " + "O_WRONLY) failed: error %d (%s)\n", opts->path, ret, + strerror(ret)); + goto done; + } + ret = test_file_chunk_setup(&chunk); + if (ret) + goto done; + while (offset < opts->length) { + ret = hdfsWrite(ldata->fs, ldata->file, chunk, VECSUM_CHUNK_SIZE); + if (ret < 0) { + ret = errno; + fprintf(stderr, "libhdfs_data_create_file: got error %d (%s) at " + "offset %lld of %s\n", ret, strerror(ret), + offset, opts->path); + goto done; + } else if (ret < VECSUM_CHUNK_SIZE) { + fprintf(stderr, "libhdfs_data_create_file: got short write " + "of %d at offset %lld of %s\n", ret, offset, opts->path); + goto done; + } + offset += VECSUM_CHUNK_SIZE; + } + ret = 0; +done: + free(chunk); + if (ldata->file) { + if (hdfsCloseFile(ldata->fs, ldata->file)) { + fprintf(stderr, "libhdfs_data_create_file: hdfsCloseFile error."); + ret = EIO; + } + ldata->file = NULL; + } + return ret; +} + +static struct libhdfs_data *libhdfs_data_create(const struct options *opts) +{ + struct libhdfs_data *ldata = NULL; + struct hdfsBuilder *builder = NULL; + hdfsFileInfo *pinfo = NULL; + + ldata = calloc(1, sizeof(struct libhdfs_data)); + if (!ldata) { + fprintf(stderr, "Failed to allocate libhdfs test data.\n"); + goto error; + } + builder = hdfsNewBuilder(); + if (!builder) { + fprintf(stderr, "Failed to create builder.\n"); + goto error; + } + hdfsBuilderSetNameNode(builder, opts->rpc_address); + hdfsBuilderConfSetStr(builder, + "dfs.client.read.shortcircuit.skip.checksum", "true"); + ldata->fs = hdfsBuilderConnect(builder); + if (!ldata->fs) { + fprintf(stderr, "Could not connect to default namenode!\n"); + goto error; + } + pinfo = hdfsGetPathInfo(ldata->fs, opts->path); + if (!pinfo) { + int err = errno; + fprintf(stderr, "hdfsGetPathInfo(%s) failed: error %d (%s). " + "Attempting to re-create file.\n", + opts->path, err, strerror(err)); + if (libhdfs_data_create_file(ldata, opts)) + goto error; + } else if (pinfo->mSize != opts->length) { + fprintf(stderr, "hdfsGetPathInfo(%s) failed: length was %lld, " + "but we want length %lld. Attempting to re-create file.\n", + opts->path, (long long)pinfo->mSize, (long long)opts->length); + if (libhdfs_data_create_file(ldata, opts)) + goto error; + } + ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_RDONLY, 0, 0, 0); + if (!ldata->file) { + int err = errno; + fprintf(stderr, "hdfsOpenFile(%s) failed: error %d (%s)\n", + opts->path, err, strerror(err)); + goto error; + } + ldata->length = opts->length; + return ldata; + +error: + if (pinfo) + hdfsFreeFileInfo(pinfo, 1); + if (ldata) + libhdfs_data_free(ldata); + return NULL; +} + +static int check_byte_size(int byte_size, const char *const str) +{ + if (byte_size % sizeof(double)) { + fprintf(stderr, "%s is not a multiple " + "of sizeof(double)\n", str); + return EINVAL; + } + if ((byte_size / sizeof(double)) % DOUBLES_PER_LOOP_ITER) { + fprintf(stderr, "The number of doubles contained in " + "%s is not a multiple of DOUBLES_PER_LOOP_ITER\n", + str); + return EINVAL; + } + return 0; +} + +#ifdef HAVE_INTEL_SSE_INTRINSICS + +#include + +static double vecsum(const double *buf, int num_doubles) +{ + int i; + double hi, lo; + __m128d x0, x1, x2, x3, x4, x5, x6, x7; + __m128d sum0 = _mm_set_pd(0.0,0.0); + __m128d sum1 = _mm_set_pd(0.0,0.0); + __m128d sum2 = _mm_set_pd(0.0,0.0); + __m128d sum3 = _mm_set_pd(0.0,0.0); + __m128d sum4 = _mm_set_pd(0.0,0.0); + __m128d sum5 = _mm_set_pd(0.0,0.0); + __m128d sum6 = _mm_set_pd(0.0,0.0); + __m128d sum7 = _mm_set_pd(0.0,0.0); + for (i = 0; i < num_doubles; i+=DOUBLES_PER_LOOP_ITER) { + x0 = _mm_load_pd(buf + i + 0); + x1 = _mm_load_pd(buf + i + 2); + x2 = _mm_load_pd(buf + i + 4); + x3 = _mm_load_pd(buf + i + 6); + x4 = _mm_load_pd(buf + i + 8); + x5 = _mm_load_pd(buf + i + 10); + x6 = _mm_load_pd(buf + i + 12); + x7 = _mm_load_pd(buf + i + 14); + sum0 = _mm_add_pd(sum0, x0); + sum1 = _mm_add_pd(sum1, x1); + sum2 = _mm_add_pd(sum2, x2); + sum3 = _mm_add_pd(sum3, x3); + sum4 = _mm_add_pd(sum4, x4); + sum5 = _mm_add_pd(sum5, x5); + sum6 = _mm_add_pd(sum6, x6); + sum7 = _mm_add_pd(sum7, x7); + } + x0 = _mm_add_pd(sum0, sum1); + x1 = _mm_add_pd(sum2, sum3); + x2 = _mm_add_pd(sum4, sum5); + x3 = _mm_add_pd(sum6, sum7); + x4 = _mm_add_pd(x0, x1); + x5 = _mm_add_pd(x2, x3); + x6 = _mm_add_pd(x4, x5); + _mm_storeh_pd(&hi, x6); + _mm_storel_pd(&lo, x6); + return hi + lo; +} + +#else + +static double vecsum(const double *buf, int num_doubles) +{ + int i; + double sum = 0.0; + for (i = 0; i < num_doubles; i++) { + sum += buf[i]; + } + return sum; +} + +#endif + +static int vecsum_zcr_loop(int pass, struct libhdfs_data *ldata, + struct hadoopRzOptions *zopts, + const struct options *opts) +{ + int32_t len; + double sum = 0.0; + const double *buf; + struct hadoopRzBuffer *rzbuf = NULL; + int ret; + + while (1) { + rzbuf = hadoopReadZero(ldata->file, zopts, ZCR_READ_CHUNK_SIZE); + if (!rzbuf) { + ret = errno; + fprintf(stderr, "hadoopReadZero failed with error " + "code %d (%s)\n", ret, strerror(ret)); + goto done; + } + buf = hadoopRzBufferGet(rzbuf); + if (!buf) break; + len = hadoopRzBufferLength(rzbuf); + if (len < ZCR_READ_CHUNK_SIZE) { + fprintf(stderr, "hadoopReadZero got a partial read " + "of length %d\n", len); + ret = EINVAL; + goto done; + } + sum += vecsum(buf, + ZCR_READ_CHUNK_SIZE / sizeof(double)); + hadoopRzBufferFree(ldata->file, rzbuf); + } + printf("finished zcr pass %d. sum = %g\n", pass, sum); + ret = 0; + +done: + if (rzbuf) + hadoopRzBufferFree(ldata->file, rzbuf); + return ret; +} + +static int vecsum_zcr(struct libhdfs_data *ldata, + const struct options *opts) +{ + int ret, pass; + struct hadoopRzOptions *zopts = NULL; + + zopts = hadoopRzOptionsAlloc(); + if (!zopts) { + fprintf(stderr, "hadoopRzOptionsAlloc failed.\n"); + ret = ENOMEM; + goto done; + } + if (hadoopRzOptionsSetSkipChecksum(zopts, 1)) { + ret = errno; + perror("hadoopRzOptionsSetSkipChecksum failed: "); + goto done; + } + if (hadoopRzOptionsSetByteBufferPool(zopts, NULL)) { + ret = errno; + perror("hadoopRzOptionsSetByteBufferPool failed: "); + goto done; + } + for (pass = 0; pass < opts->passes; ++pass) { + ret = vecsum_zcr_loop(pass, ldata, zopts, opts); + if (ret) { + fprintf(stderr, "vecsum_zcr_loop pass %d failed " + "with error %d\n", pass, ret); + goto done; + } + hdfsSeek(ldata->fs, ldata->file, 0); + } + ret = 0; +done: + if (zopts) + hadoopRzOptionsFree(zopts); + return ret; +} + +tSize hdfsReadFully(hdfsFS fs, hdfsFile f, void* buffer, tSize length) +{ + uint8_t *buf = buffer; + tSize ret, nread = 0; + + while (length > 0) { + ret = hdfsRead(fs, f, buf, length); + if (ret < 0) { + if (errno != EINTR) { + return -1; + } + } + if (ret == 0) { + break; + } + nread += ret; + length -= ret; + buf += ret; + } + return nread; +} + +static int vecsum_normal_loop(int pass, const struct libhdfs_data *ldata, + const struct options *opts) +{ + double sum = 0.0; + + while (1) { + int res = hdfsReadFully(ldata->fs, ldata->file, ldata->buf, + NORMAL_READ_CHUNK_SIZE); + if (res == 0) // EOF + break; + if (res < 0) { + int err = errno; + fprintf(stderr, "hdfsRead failed with error %d (%s)\n", + err, strerror(err)); + return err; + } + if (res < NORMAL_READ_CHUNK_SIZE) { + fprintf(stderr, "hdfsRead got a partial read of " + "length %d\n", res); + return EINVAL; + } + sum += vecsum(ldata->buf, + NORMAL_READ_CHUNK_SIZE / sizeof(double)); + } + printf("finished normal pass %d. sum = %g\n", pass, sum); + return 0; +} + +static int vecsum_libhdfs(struct libhdfs_data *ldata, + const struct options *opts) +{ + int pass; + + ldata->buf = malloc(NORMAL_READ_CHUNK_SIZE); + if (!ldata->buf) { + fprintf(stderr, "failed to malloc buffer of size %d\n", + NORMAL_READ_CHUNK_SIZE); + return ENOMEM; + } + for (pass = 0; pass < opts->passes; ++pass) { + int ret = vecsum_normal_loop(pass, ldata, opts); + if (ret) { + fprintf(stderr, "vecsum_normal_loop pass %d failed " + "with error %d\n", pass, ret); + return ret; + } + hdfsSeek(ldata->fs, ldata->file, 0); + } + return 0; +} + +static void vecsum_local(struct local_data *cdata, const struct options *opts) +{ + int pass; + + for (pass = 0; pass < opts->passes; pass++) { + double sum = vecsum(cdata->mmap, cdata->length / sizeof(double)); + printf("finished vecsum_local pass %d. sum = %g\n", pass, sum); + } +} + +static long long vecsum_length(const struct options *opts, + const struct libhdfs_data *ldata) +{ + if (opts->ty == VECSUM_LOCAL) { + struct stat st_buf = { 0 }; + if (stat(opts->path, &st_buf)) { + int err = errno; + fprintf(stderr, "vecsum_length: stat(%s) failed: " + "error %d (%s)\n", opts->path, err, strerror(err)); + return -EIO; + } + return st_buf.st_size; + } else { + return ldata->length; + } +} + +/* + * vecsum is a microbenchmark which measures the speed of various ways of + * reading from HDFS. It creates a file containing floating-point 'doubles', + * and computes the sum of all the doubles several times. For some CPUs, + * assembly optimizations are used for the summation (SSE, etc). + */ +int main(void) +{ + int ret = 1; + struct options *opts = NULL; + struct local_data *cdata = NULL; + struct libhdfs_data *ldata = NULL; + struct stopwatch *watch = NULL; + + if (check_byte_size(VECSUM_CHUNK_SIZE, "VECSUM_CHUNK_SIZE") || + check_byte_size(ZCR_READ_CHUNK_SIZE, + "ZCR_READ_CHUNK_SIZE") || + check_byte_size(NORMAL_READ_CHUNK_SIZE, + "NORMAL_READ_CHUNK_SIZE")) { + goto done; + } + opts = options_create(); + if (!opts) + goto done; + if (opts->ty == VECSUM_LOCAL) { + cdata = local_data_create(opts); + if (!cdata) + goto done; + } else { + ldata = libhdfs_data_create(opts); + if (!ldata) + goto done; + } + watch = stopwatch_create(); + if (!watch) + goto done; + switch (opts->ty) { + case VECSUM_LOCAL: + vecsum_local(cdata, opts); + ret = 0; + break; + case VECSUM_LIBHDFS: + ret = vecsum_libhdfs(ldata, opts); + break; + case VECSUM_ZCR: + ret = vecsum_zcr(ldata, opts); + break; + } + if (ret) { + fprintf(stderr, "vecsum failed with error %d\n", ret); + goto done; + } + ret = 0; +done: + fprintf(stderr, "cleaning up...\n"); + if (watch && (ret == 0)) { + long long length = vecsum_length(opts, ldata); + if (length >= 0) { + stopwatch_stop(watch, length * opts->passes); + } + } + if (cdata) + local_data_free(cdata); + if (ldata) + libhdfs_data_free(ldata); + if (opts) + options_free(opts); + return ret; +} + +// vim: ts=4:sw=4:tw=79:et diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c index 3a8f31dccac..c4ea060ec7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c @@ -122,7 +122,7 @@ struct tlhPaths { static int setupPaths(const struct tlhThreadInfo *ti, struct tlhPaths *paths) { - memset(paths, sizeof(*paths), 0); + memset(paths, 0, sizeof(*paths)); if (snprintf(paths->prefix, sizeof(paths->prefix), "/tlhData%04d", ti->threadIdx) >= sizeof(paths->prefix)) { return ENAMETOOLONG; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 7f20e5511ca..0c5fed24395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -91,7 +91,7 @@

    Browse Directory

    {#FileStatus} - {type|helper_to_directory}{permission|helper_to_permission} + {type|helper_to_directory}{permission|helper_to_permission}{aclBit|helper_to_acl_bit} {owner} {group} {length|fmt_bytes} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js index c029407e6de..3c8efd97a04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js @@ -63,7 +63,8 @@ 'helper_to_permission': function (v) { var symbols = [ '---', '--x', '-w-', '-wx', 'r--', 'r-x', 'rw-', 'rwx' ]; - var sticky = v > 1000; + var vInt = parseInt(v, 8); + var sticky = (vInt & (1 << 9)) != 0; var res = ""; for (var i = 0; i < 3; ++i) { @@ -72,7 +73,7 @@ } if (sticky) { - var otherExec = ((v % 10) & 1) == 1; + var otherExec = (vInt & 1) == 1; res = res.substr(0, res.length - 1) + (otherExec ? 't' : 'T'); } @@ -81,6 +82,10 @@ 'helper_to_directory' : function (v) { return v === 'DIRECTORY' ? 'd' : '-'; + }, + + 'helper_to_acl_bit': function (v) { + return v ? '+' : ""; } }; $.extend(dust.filters, filters); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 7a39dee427b..667204c0c9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -22,8 +22,9 @@ import java.io.IOException; import java.net.URI; import java.util.Collection; -import java.util.HashMap; -import java.util.Map; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; @@ -39,6 +40,9 @@ import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup; import org.apache.hadoop.net.NetworkTopology; @@ -53,7 +57,7 @@ public class TestBalancerWithNodeGroup { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestBalancerWithNodeGroup"); - final private static long CAPACITY = 6000L; + final private static long CAPACITY = 5000L; final private static String RACK0 = "/rack0"; final private static String RACK1 = "/rack1"; final private static String NODEGROUP0 = "/nodegroup0"; @@ -77,6 +81,7 @@ public class TestBalancerWithNodeGroup { static Configuration createConf() { Configuration conf = new HdfsConfiguration(); TestBalancer.initConf(conf); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, @@ -191,6 +196,19 @@ private void runBalancerCanFinish(Configuration conf, LOG.info("Rebalancing with default factor."); } + private Set getBlocksOnRack(List blks, String rack) { + Set ret = new HashSet(); + for (LocatedBlock blk : blks) { + for (DatanodeInfo di : blk.getLocations()) { + if (rack.equals(NetworkTopology.getFirstHalf(di.getNetworkLocation()))) { + ret.add(blk.getBlock()); + break; + } + } + } + return ret; + } + /** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test rack locality for balancer policy. @@ -220,9 +238,14 @@ public void testBalancerWithRackLocality() throws Exception { // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity * 3 / 10; - TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes, + long length = totalUsedSpace / numOfDatanodes; + TestBalancer.createFile(cluster, filePath, length, (short) numOfDatanodes, 0); + LocatedBlocks lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, + length); + Set before = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0); + long newCapacity = CAPACITY; String newRack = RACK1; String newNodeGroup = NODEGROUP2; @@ -235,22 +258,9 @@ public void testBalancerWithRackLocality() throws Exception { // run balancer and validate results runBalancerCanFinish(conf, totalUsedSpace, totalCapacity); - DatanodeInfo[] datanodeReport = - client.getDatanodeReport(DatanodeReportType.ALL); - - Map rackToUsedCapacity = new HashMap(); - for (DatanodeInfo datanode: datanodeReport) { - String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation()); - int usedCapacity = (int) datanode.getDfsUsed(); - - if (rackToUsedCapacity.get(rack) != null) { - rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack)); - } else { - rackToUsedCapacity.put(rack, usedCapacity); - } - } - assertEquals(rackToUsedCapacity.size(), 2); - assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1)); + lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length); + Set after = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0); + assertEquals(before, after); } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java index 08af1bb9aad..52a68584dd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -150,6 +151,9 @@ public static void assertFilePermissionGranted(FileSystem fs, */ public static void assertPermission(FileSystem fs, Path pathToCheck, short perm) throws IOException { - assertEquals(perm, fs.getFileStatus(pathToCheck).getPermission().toShort()); + short filteredPerm = (short)(perm & 01777); + FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission(); + assertEquals(filteredPerm, fsPermission.toShort()); + assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index a3e94969923..f36483e642d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.hdfs.protocol.FsAclPermission; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -118,7 +119,7 @@ public void testModifyAclEntries() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -140,7 +141,7 @@ public void testModifyAclEntriesOnlyAccess() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -161,7 +162,7 @@ public void testModifyAclEntriesOnlyDefault() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -177,7 +178,7 @@ public void testModifyAclEntriesMinimal() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_WRITE), aclEntry(ACCESS, GROUP, READ) }, returned); - assertPermission((short)0660); + assertPermission((short)010660); assertAclFeature(true); } @@ -195,7 +196,7 @@ public void testModifyAclEntriesMinimalDefault() throws IOException { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -212,7 +213,7 @@ public void testModifyAclEntriesCustomMask() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ) }, returned); - assertPermission((short)0600); + assertPermission((short)010600); assertAclFeature(true); } @@ -240,7 +241,7 @@ public void testModifyAclEntriesStickyBit() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)01750); + assertPermission((short)011750); assertAclFeature(true); } @@ -286,7 +287,7 @@ public void testRemoveAclEntries() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -309,7 +310,7 @@ public void testRemoveAclEntriesOnlyAccess() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bar", READ_WRITE), aclEntry(ACCESS, GROUP, READ_WRITE) }, returned); - assertPermission((short)0760); + assertPermission((short)010760); assertAclFeature(true); } @@ -334,7 +335,7 @@ public void testRemoveAclEntriesOnlyDefault() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -382,7 +383,7 @@ public void testRemoveAclEntriesMinimalDefault() throws IOException { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -408,7 +409,7 @@ public void testRemoveAclEntriesStickyBit() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)01750); + assertPermission((short)011750); assertAclFeature(true); } @@ -436,7 +437,7 @@ public void testRemoveDefaultAcl() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); - assertPermission((short)0770); + assertPermission((short)010770); assertAclFeature(true); } @@ -456,7 +457,7 @@ public void testRemoveDefaultAclOnlyAccess() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); - assertPermission((short)0770); + assertPermission((short)010770); assertAclFeature(true); } @@ -501,7 +502,7 @@ public void testRemoveDefaultAclStickyBit() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); - assertPermission((short)01770); + assertPermission((short)011770); assertAclFeature(true); } @@ -602,7 +603,7 @@ public void testSetAcl() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0770); + assertPermission((short)010770); assertAclFeature(true); } @@ -621,7 +622,7 @@ public void testSetAclOnlyAccess() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ) }, returned); - assertPermission((short)0640); + assertPermission((short)010640); assertAclFeature(true); } @@ -639,7 +640,7 @@ public void testSetAclOnlyDefault() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -679,7 +680,7 @@ public void testSetAclMinimalDefault() throws IOException { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0750); + assertPermission((short)010750); assertAclFeature(true); } @@ -699,7 +700,7 @@ public void testSetAclCustomMask() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ) }, returned); - assertPermission((short)0670); + assertPermission((short)010670); assertAclFeature(true); } @@ -723,7 +724,7 @@ public void testSetAclStickyBit() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)01770); + assertPermission((short)011770); assertAclFeature(true); } @@ -768,7 +769,7 @@ public void testSetPermission() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0700); + assertPermission((short)010700); assertAclFeature(true); } @@ -788,7 +789,7 @@ public void testSetPermissionOnlyAccess() throws IOException { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ) }, returned); - assertPermission((short)0600); + assertPermission((short)010600); assertAclFeature(true); } @@ -810,10 +811,27 @@ public void testSetPermissionOnlyDefault() throws IOException { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0700); + assertPermission((short)010700); assertAclFeature(true); } + @Test + public void testSetPermissionCannotSetAclBit() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setPermission(path, FsPermission.createImmutable((short)0700)); + assertPermission((short)0700); + fs.setPermission(path, + new FsAclPermission(FsPermission.createImmutable((short)0755))); + INode inode = cluster.getNamesystem().getFSDirectory().getNode( + path.toUri().getPath(), false); + assertNotNull(inode); + FsPermission perm = inode.getFsPermission(); + assertNotNull(perm); + assertEquals(0755, perm.toShort()); + assertEquals(0755, perm.toExtendedShort()); + assertAclFeature(false); + } + @Test public void testDefaultAclNewFile() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); @@ -827,7 +845,7 @@ public void testDefaultAclNewFile() throws Exception { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); - assertPermission(filePath, (short)0640); + assertPermission(filePath, (short)010640); assertAclFeature(filePath, true); } @@ -881,7 +899,7 @@ public void testDefaultAclNewDir() throws Exception { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission(dirPath, (short)0750); + assertPermission(dirPath, (short)010750); assertAclFeature(dirPath, true); } @@ -916,7 +934,7 @@ public void testDefaultMinimalAclNewDir() throws Exception { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission(dirPath, (short)0750); + assertPermission(dirPath, (short)010750); assertAclFeature(dirPath, true); } @@ -940,7 +958,7 @@ public void testDefaultAclNewFileIntermediate() throws Exception { AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission(dirPath, (short)0750); + assertPermission(dirPath, (short)010750); assertAclFeature(dirPath, true); expected = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), @@ -948,7 +966,7 @@ public void testDefaultAclNewFileIntermediate() throws Exception { s = fs.getAclStatus(filePath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission(filePath, (short)0640); + assertPermission(filePath, (short)010640); assertAclFeature(filePath, true); } @@ -972,12 +990,12 @@ public void testDefaultAclNewDirIntermediate() throws Exception { AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission(dirPath, (short)0750); + assertPermission(dirPath, (short)010750); assertAclFeature(dirPath, true); s = fs.getAclStatus(subdirPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission(subdirPath, (short)0750); + assertPermission(subdirPath, (short)010750); assertAclFeature(subdirPath, true); } @@ -1004,7 +1022,7 @@ public void testDefaultAclNewSymlinkIntermediate() throws Exception { AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission(dirPath, (short)0750); + assertPermission(dirPath, (short)010750); assertAclFeature(dirPath, true); expected = new AclEntry[] { }; s = fs.getAclStatus(linkPath); @@ -1037,7 +1055,7 @@ public void testDefaultAclNewFileWithMode() throws Exception { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); - assertPermission(filePath, (short)0740); + assertPermission(filePath, (short)010740); assertAclFeature(filePath, true); } @@ -1059,7 +1077,7 @@ public void testDefaultAclNewDirWithMode() throws Exception { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) }, returned); - assertPermission(dirPath, (short)0740); + assertPermission(dirPath, (short)010740); assertAclFeature(dirPath, true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index ecc738aadcd..ed124603f3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -477,6 +477,12 @@ public void testAddRemoveDirectives() throws Exception { iter = dfs.listCacheDirectives( new CacheDirectiveInfo.Builder().setPool("pool2").build()); validateListAll(iter, betaId); + iter = dfs.listCacheDirectives( + new CacheDirectiveInfo.Builder().setId(alphaId2).build()); + validateListAll(iter, alphaId2); + iter = dfs.listCacheDirectives( + new CacheDirectiveInfo.Builder().setId(relativeId).build()); + validateListAll(iter, relativeId); dfs.removeCacheDirective(betaId); iter = dfs.listCacheDirectives( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 2861513314a..5061fe41456 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -27,6 +27,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -43,6 +44,7 @@ import java.util.Collection; import java.util.List; +import com.google.common.io.Files; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -87,7 +89,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Level; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentMatcher; @@ -1084,7 +1085,7 @@ public void testCheckpoint() throws IOException { FSDirectory secondaryFsDir = secondary.getFSNamesystem().dir; INode rootInMap = secondaryFsDir.getInode(secondaryFsDir.rootDir.getId()); - Assert.assertSame(rootInMap, secondaryFsDir.rootDir); + assertSame(rootInMap, secondaryFsDir.rootDir); fileSys.delete(tmpDir, true); fileSys.mkdirs(tmpDir); @@ -2404,6 +2405,46 @@ public void testCommandLineParsing() throws ParseException { } } + @Test + public void testLegacyOivImage() throws Exception { + MiniDFSCluster cluster = null; + SecondaryNameNode secondary = null; + File tmpDir = Files.createTempDir(); + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY, + tmpDir.getAbsolutePath()); + conf.set(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, + "2"); + + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + .format(true).build(); + + secondary = startSecondaryNameNode(conf); + + // Checkpoint once + secondary.doCheckpoint(); + String files1[] = tmpDir.list(); + assertEquals("Only one file is expected", 1, files1.length); + + // Perform more checkpointngs and check whether retention management + // is working. + secondary.doCheckpoint(); + secondary.doCheckpoint(); + String files2[] = tmpDir.list(); + assertEquals("Two files are expected", 2, files2.length); + + // Verify that the first file is deleted. + for (String fName : files2) { + assertFalse(fName.equals(files1[0])); + } + } finally { + cleanup(secondary); + cleanup(cluster); + tmpDir.delete(); + } + } + private static void cleanup(SecondaryNameNode snn) { if (snn != null) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java index 03889ea079e..bd884783576 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java @@ -142,7 +142,7 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)0755); + assertPermission(fs, subdirPath, (short)010755); restart(fs, persistNamespace); @@ -152,7 +152,7 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)0755); + assertPermission(fs, subdirPath, (short)010755); aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)); fs.modifyAclEntries(dirPath, aclSpec); @@ -163,7 +163,7 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)0755); + assertPermission(fs, subdirPath, (short)010755); restart(fs, persistNamespace); @@ -173,7 +173,7 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)0755); + assertPermission(fs, subdirPath, (short)010755); fs.removeAcl(dirPath); @@ -183,7 +183,7 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)0755); + assertPermission(fs, subdirPath, (short)010755); restart(fs, persistNamespace); @@ -193,7 +193,7 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)0755); + assertPermission(fs, subdirPath, (short)010755); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 67a590548f1..3813319f029 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -66,24 +66,28 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; - +import com.google.common.io.Files; public class TestStandbyCheckpoints { private static final int NUM_DIRS_IN_LOG = 200000; protected MiniDFSCluster cluster; protected NameNode nn0, nn1; protected FileSystem fs; + protected File tmpOivImgDir; private static final Log LOG = LogFactory.getLog(TestStandbyCheckpoints.class); @SuppressWarnings("rawtypes") @Before public void setupCluster() throws Exception { + tmpOivImgDir = Files.createTempDir(); Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); - + conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY, + tmpOivImgDir.getAbsolutePath()); + // Dial down the retention of extra edits and checkpoints. This is to // help catch regressions of HDFS-4238 (SBN should not purge shared edits) conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1); @@ -129,6 +133,9 @@ public void testSBNCheckpoints() throws Exception { // Once the standby catches up, it should notice that it needs to // do a checkpoint and save one to its local directories. HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); + + // It should have saved the oiv image too. + assertEquals("One file is expected", 1, tmpOivImgDir.list().length); // It should also upload it back to the active. HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java index 0c8084183fb..3deb47ff3af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java @@ -119,14 +119,14 @@ public void testOriginalAclEnforcedForSnapshotRootAfterChange() assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0750, path); + assertPermission((short)010750, path); s = hdfs.getAclStatus(snapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0750, snapshotPath); + assertPermission((short)010750, snapshotPath); assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath); assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath); @@ -153,14 +153,14 @@ private static void doSnapshotRootChangeAssertions(Path path, assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "diana", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0550, path); + assertPermission((short)010550, path); s = hdfs.getAclStatus(snapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0750, snapshotPath); + assertPermission((short)010750, snapshotPath); assertDirPermissionDenied(fsAsBruce, BRUCE, path); assertDirPermissionGranted(fsAsDiana, DIANA, path); @@ -202,24 +202,24 @@ public void testOriginalAclEnforcedForSnapshotContentsAfterChange() AclStatus s = hdfs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, filePath); + assertPermission((short)010550, filePath); s = hdfs.getAclStatus(subdirPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, subdirPath); + assertPermission((short)010550, subdirPath); s = hdfs.getAclStatus(fileSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, fileSnapshotPath); + assertPermission((short)010550, fileSnapshotPath); assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); s = hdfs.getAclStatus(subdirSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, subdirSnapshotPath); + assertPermission((short)010550, subdirSnapshotPath); assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); @@ -251,14 +251,14 @@ private static void doSnapshotContentsChangeAssertions(Path filePath, AclStatus s = hdfs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0570, filePath); + assertPermission((short)010570, filePath); assertFilePermissionDenied(fsAsBruce, BRUCE, filePath); assertFilePermissionGranted(fsAsDiana, DIANA, filePath); s = hdfs.getAclStatus(subdirPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0570, subdirPath); + assertPermission((short)010570, subdirPath); assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath); assertDirPermissionGranted(fsAsDiana, DIANA, subdirPath); @@ -268,14 +268,14 @@ private static void doSnapshotContentsChangeAssertions(Path filePath, s = hdfs.getAclStatus(fileSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, fileSnapshotPath); + assertPermission((short)010550, fileSnapshotPath); assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); s = hdfs.getAclStatus(subdirSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, subdirSnapshotPath); + assertPermission((short)010550, subdirSnapshotPath); assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); } @@ -302,14 +302,14 @@ public void testOriginalAclEnforcedForSnapshotRootAfterRemoval() assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0750, path); + assertPermission((short)010750, path); s = hdfs.getAclStatus(snapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0750, snapshotPath); + assertPermission((short)010750, snapshotPath); assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath); assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath); @@ -336,7 +336,7 @@ private static void doSnapshotRootRemovalAssertions(Path path, assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0750, snapshotPath); + assertPermission((short)010750, snapshotPath); assertDirPermissionDenied(fsAsBruce, BRUCE, path); assertDirPermissionDenied(fsAsDiana, DIANA, path); @@ -378,24 +378,24 @@ public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval() AclStatus s = hdfs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, filePath); + assertPermission((short)010550, filePath); s = hdfs.getAclStatus(subdirPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, subdirPath); + assertPermission((short)010550, subdirPath); s = hdfs.getAclStatus(fileSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, fileSnapshotPath); + assertPermission((short)010550, fileSnapshotPath); assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); s = hdfs.getAclStatus(subdirSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, subdirSnapshotPath); + assertPermission((short)010550, subdirSnapshotPath); assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); @@ -437,14 +437,14 @@ private static void doSnapshotContentsRemovalAssertions(Path filePath, s = hdfs.getAclStatus(fileSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, fileSnapshotPath); + assertPermission((short)010550, fileSnapshotPath); assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); s = hdfs.getAclStatus(subdirSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0550, subdirSnapshotPath); + assertPermission((short)010550, subdirSnapshotPath); assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); } @@ -470,7 +470,7 @@ public void testModifyReadsCurrentState() throws Exception { AclStatus s = hdfs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission((short)0770, path); + assertPermission((short)010770, path); assertDirPermissionGranted(fsAsBruce, BRUCE, path); assertDirPermissionGranted(fsAsDiana, DIANA, path); } @@ -514,7 +514,7 @@ public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot() aclEntry(DEFAULT, GROUP, NONE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0700, path); + assertPermission((short)010700, path); s = hdfs.getAclStatus(snapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); @@ -524,7 +524,7 @@ public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot() aclEntry(DEFAULT, GROUP, NONE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission((short)0700, snapshotPath); + assertPermission((short)010700, snapshotPath); assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath); } @@ -596,14 +596,14 @@ public void testChangeAclExceedsQuota() throws Exception { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0660, filePath); + assertPermission((short)010660, filePath); s = hdfs.getAclStatus(fileSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0660, filePath); + assertPermission((short)010660, filePath); aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "bruce", READ)); @@ -632,14 +632,14 @@ public void testRemoveAclExceedsQuota() throws Exception { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0660, filePath); + assertPermission((short)010660, filePath); s = hdfs.getAclStatus(fileSnapshotPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned); - assertPermission((short)0660, filePath); + assertPermission((short)010660, filePath); aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "bruce", READ)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 444faef4b70..33da4d4946f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -176,6 +176,7 @@ public void testHelp() throws Exception { @Test public void testTransitionToActive() throws Exception { + Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(0, runTool("-transitionToActive", "nn1")); Mockito.verify(mockProtocol).transitionToActive( reqInfoCaptor.capture()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index 841aa433d5d..288bcd0b002 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAAdmin; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -204,6 +205,70 @@ public void testCheckHealth() throws Exception { assertEquals(0, runTool("-checkHealth", "nn2")); } + /** + * Test case to check whether both the name node is active or not + * @throws Exception + */ + @Test + public void testTransitionToActiveWhenOtherNamenodeisActive() + throws Exception { + NameNode nn1 = cluster.getNameNode(0); + NameNode nn2 = cluster.getNameNode(1); + if(nn1.getState() != null && !nn1.getState(). + equals(HAServiceState.STANDBY.name()) ) { + cluster.transitionToStandby(0); + } + if(nn2.getState() != null && !nn2.getState(). + equals(HAServiceState.STANDBY.name()) ) { + cluster.transitionToStandby(1); + } + //Making sure both the namenode are in standby state + assertTrue(nn1.isStandbyState()); + assertTrue(nn2.isStandbyState()); + // Triggering the transition for both namenode to Active + runTool("-transitionToActive", "nn1"); + runTool("-transitionToActive", "nn2"); + + assertFalse("Both namenodes cannot be active", nn1.isActiveState() + && nn2.isActiveState()); + + /* This test case doesn't allow nn2 to transition to Active even with + forceActive switch since nn1 is already active */ + if(nn1.getState() != null && !nn1.getState(). + equals(HAServiceState.STANDBY.name()) ) { + cluster.transitionToStandby(0); + } + if(nn2.getState() != null && !nn2.getState(). + equals(HAServiceState.STANDBY.name()) ) { + cluster.transitionToStandby(1); + } + //Making sure both the namenode are in standby state + assertTrue(nn1.isStandbyState()); + assertTrue(nn2.isStandbyState()); + + runTool("-transitionToActive", "nn1"); + runTool("-transitionToActive", "nn2","--forceactive"); + + assertFalse("Both namenodes cannot be active even though with forceActive", + nn1.isActiveState() && nn2.isActiveState()); + + /* In this test case, we have deliberately shut down nn1 and this will + cause HAAAdmin#isOtherTargetNodeActive to throw an Exception + and transitionToActive for nn2 with forceActive switch will succeed + even with Exception */ + cluster.shutdownNameNode(0); + if(nn2.getState() != null && !nn2.getState(). + equals(HAServiceState.STANDBY.name()) ) { + cluster.transitionToStandby(1); + } + //Making sure both the namenode (nn2) is in standby state + assertTrue(nn2.isStandbyState()); + assertFalse(cluster.isNameNodeUp(0)); + + runTool("-transitionToActive", "nn2", "--forceactive"); + assertTrue("Namenode nn2 should be active", nn2.isActiveState()); + } + private int runTool(String ... args) throws Exception { errOutBytes.reset(); LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml index 13f1b9ae14d..058eec59aa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml @@ -519,5 +519,29 @@ + + + Testing listing a single cache directive + + -addPool pool1 + -addDirective -path /foo -pool pool1 -ttl 2d + -addDirective -path /bar -pool pool1 -ttl 24h + -addDirective -path /baz -replication 2 -pool pool1 -ttl 60m + -listDirectives -stats -id 30 + + + -removePool pool1 + + + + SubstringComparator + Found 1 entry + + + SubstringComparator + 30 pool1 1 + + + diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 7c1cea65716..c9661812ff0 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -194,6 +194,11 @@ Release 2.5.0 - UNRELEASED MAPREDUCE-5652. NM Recovery. ShuffleHandler should handle NM restarts. (Jason Lowe via kasha) + MAPREDUCE-5861. finishedSubMaps field in LocalContainerLauncher does not + need to be volatile. (Tsuyoshi OZAWA via junping_du) + + MAPREDUCE-5809. Enhance distcp to support preserving HDFS ACLs. (cnauroth) + OPTIMIZATIONS BUG FIXES @@ -273,6 +278,9 @@ Release 2.4.1 - UNRELEASED MAPREDUCE-5835. Killing Task might cause the job to go to ERROR state (Ming Ma via jlowe) + MAPREDUCE-5821. Avoid unintentional reallocation of byte arrays in segments + during merge. (Todd Lipcon via cdouglas) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index a21b3d5b284..6425144b6b5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -177,8 +177,10 @@ public void handle(ContainerLauncherEvent event) { */ private class EventHandler implements Runnable { - private volatile boolean doneWithMaps = false; - private volatile int finishedSubMaps = 0; + // doneWithMaps and finishedSubMaps are accessed from only + // one thread. Therefore, no need to make them volatile. + private boolean doneWithMaps = false; + private int finishedSubMaps = 0; private final Map> futures = new ConcurrentHashMap>(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java index b4362ac5096..9493871138d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java @@ -537,6 +537,8 @@ public boolean next() throws IOException { } } minSegment = top(); + long startPos = minSegment.getPosition(); + key = minSegment.getKey(); if (!minSegment.inMemory()) { //When we load the value from an inmemory segment, we reset //the "value" DIB in this class to the inmem segment's byte[]. @@ -547,11 +549,11 @@ public boolean next() throws IOException { //segment, we reset the "value" DIB to the byte[] in that (so //we reuse the disk segment DIB whenever we consider //a disk segment). + minSegment.getValue(diskIFileValue); value.reset(diskIFileValue.getData(), diskIFileValue.getLength()); + } else { + minSegment.getValue(value); } - long startPos = minSegment.getPosition(); - key = minSegment.getKey(); - minSegment.getValue(value); long endPos = minSegment.getPosition(); totalBytesProcessed += endPos - startPos; mergeProgress.set(totalBytesProcessed * progPerByte); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm similarity index 99% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm index de0093c269c..54b0dc4acdb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm @@ -18,8 +18,6 @@ MapReduce Application Master REST API's. - \[ {{{./index.html}Go Back}} \] - %{toc|section=1|fromDepth=0|toDepth=2} * Overview diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm similarity index 99% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm index a47a565cc2b..5a1e36bf8e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm @@ -11,20 +11,18 @@ ~~ limitations under the License. See accompanying LICENSE file. --- - History Server REST API's. + MapReduce History Server REST API's. --- --- ${maven.build.timestamp} -History Server REST API's. - - \[ {{{./index.html}Go Back}} \] +MapReduce History Server REST API's. %{toc|section=1|fromDepth=0|toDepth=3} * Overview - The history server REST API's allow the user to get status on finished applications. Currently it only supports MapReduce and provides information on finished jobs. + The history server REST API's allow the user to get status on finished applications. * History Server Information API @@ -2671,4 +2669,4 @@ History Server REST API's. -+---+ \ No newline at end of file ++---+ diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 3ab1cf0f747..8803e4b3b6a 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -99,6 +99,11 @@ + + + + + @@ -116,8 +121,6 @@ - - diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java index 52d59936816..a0d85c556ae 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java @@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.IOUtils; @@ -31,11 +30,15 @@ import java.io.IOException; import java.lang.reflect.Constructor; +import java.net.URI; +import java.util.Set; + +import com.google.common.collect.Sets; /** * The CopyListing abstraction is responsible for how the list of * sources and targets is constructed, for DistCp's copy function. - * The copy-listing should be a SequenceFile, + * The copy-listing should be a SequenceFile, * located at the path specified to buildListing(), * each entry being a pair of (Source relative path, source file status), * all the paths being fully qualified. @@ -85,7 +88,7 @@ public final void buildListing(Path pathToListFile, config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, getBytesToCopy()); config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, getNumberOfPaths()); - checkForDuplicates(pathToListFile); + validateFinalListing(pathToListFile, options); } /** @@ -124,13 +127,15 @@ protected abstract void doBuildListing(Path pathToListFile, protected abstract long getNumberOfPaths(); /** - * Validate the final resulting path listing to see if there are any duplicate entries + * Validate the final resulting path listing. Checks if there are duplicate + * entries. If preserving ACLs, checks that file system can support ACLs. * * @param pathToListFile - path listing build by doBuildListing + * @param options - Input options to distcp * @throws IOException - Any issues while checking for duplicates and throws * @throws DuplicateFileException - if there are duplicates */ - private void checkForDuplicates(Path pathToListFile) + private void validateFinalListing(Path pathToListFile, DistCpOptions options) throws DuplicateFileException, IOException { Configuration config = getConf(); @@ -142,17 +147,26 @@ private void checkForDuplicates(Path pathToListFile) config, SequenceFile.Reader.file(sortedList)); try { Text lastKey = new Text("*"); //source relative path can never hold * - FileStatus lastFileStatus = new FileStatus(); + CopyListingFileStatus lastFileStatus = new CopyListingFileStatus(); Text currentKey = new Text(); + Set aclSupportCheckFsSet = Sets.newHashSet(); while (reader.next(currentKey)) { if (currentKey.equals(lastKey)) { - FileStatus currentFileStatus = new FileStatus(); + CopyListingFileStatus currentFileStatus = new CopyListingFileStatus(); reader.getCurrentValue(currentFileStatus); throw new DuplicateFileException("File " + lastFileStatus.getPath() + " and " + currentFileStatus.getPath() + " would cause duplicates. Aborting"); } reader.getCurrentValue(lastFileStatus); + if (options.shouldPreserve(DistCpOptions.FileAttribute.ACL)) { + FileSystem lastFs = lastFileStatus.getPath().getFileSystem(config); + URI lastFsUri = lastFs.getUri(); + if (!aclSupportCheckFsSet.contains(lastFsUri)) { + DistCpUtils.checkFileSystemAclSupport(lastFs); + aclSupportCheckFsSet.add(lastFsUri); + } + } lastKey.set(currentKey); } } finally { @@ -236,4 +250,10 @@ public InvalidInputException(String message) { super(message); } } + + public static class AclsNotSupportedException extends RuntimeException { + public AclsNotSupportedException(String message) { + super(message); + } + } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java new file mode 100644 index 00000000000..3a0c37fd3f2 --- /dev/null +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclUtil; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.io.WritableUtils; + +import com.google.common.base.Objects; +import com.google.common.collect.Lists; + +/** + * CopyListingFileStatus is a specialized subclass of {@link FileStatus} for + * attaching additional data members useful to distcp. This class does not + * override {@link FileStatus#compareTo}, because the additional data members + * are not relevant to sort order. + */ +@InterfaceAudience.Private +public final class CopyListingFileStatus extends FileStatus { + + private static final byte NO_ACL_ENTRIES = -1; + + // Retain static arrays of enum values to prevent repeated allocation of new + // arrays during deserialization. + private static final AclEntryType[] ACL_ENTRY_TYPES = AclEntryType.values(); + private static final AclEntryScope[] ACL_ENTRY_SCOPES = AclEntryScope.values(); + private static final FsAction[] FS_ACTIONS = FsAction.values(); + + private List aclEntries; + + /** + * Default constructor. + */ + public CopyListingFileStatus() { + } + + /** + * Creates a new CopyListingFileStatus by copying the members of the given + * FileStatus. + * + * @param fileStatus FileStatus to copy + */ + public CopyListingFileStatus(FileStatus fileStatus) throws IOException { + super(fileStatus); + } + + /** + * Returns the full logical ACL. + * + * @return List containing full logical ACL + */ + public List getAclEntries() { + return AclUtil.getAclFromPermAndEntries(getPermission(), + aclEntries != null ? aclEntries : Collections.emptyList()); + } + + /** + * Sets optional ACL entries. + * + * @param aclEntries List containing all ACL entries + */ + public void setAclEntries(List aclEntries) { + this.aclEntries = aclEntries; + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + if (aclEntries != null) { + // byte is sufficient, because 32 ACL entries is the max enforced by HDFS. + out.writeByte(aclEntries.size()); + for (AclEntry entry: aclEntries) { + out.writeByte(entry.getScope().ordinal()); + out.writeByte(entry.getType().ordinal()); + WritableUtils.writeString(out, entry.getName()); + out.writeByte(entry.getPermission().ordinal()); + } + } else { + out.writeByte(NO_ACL_ENTRIES); + } + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + byte aclEntriesSize = in.readByte(); + if (aclEntriesSize != NO_ACL_ENTRIES) { + aclEntries = Lists.newArrayListWithCapacity(aclEntriesSize); + for (int i = 0; i < aclEntriesSize; ++i) { + aclEntries.add(new AclEntry.Builder() + .setScope(ACL_ENTRY_SCOPES[in.readByte()]) + .setType(ACL_ENTRY_TYPES[in.readByte()]) + .setName(WritableUtils.readString(in)) + .setPermission(FS_ACTIONS[in.readByte()]) + .build()); + } + } else { + aclEntries = null; + } + } + + @Override + public boolean equals(Object o) { + if (!super.equals(o)) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + CopyListingFileStatus other = (CopyListingFileStatus)o; + return Objects.equal(aclEntries, other.aclEntries); + } + + @Override + public int hashCode() { + return Objects.hashCode(super.hashCode(), aclEntries); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(super.toString()); + sb.append('{'); + sb.append("aclEntries = " + aclEntries); + sb.append('}'); + return sb.toString(); + } +} diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java index b52023cd3b1..b3c506ea096 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java @@ -125,6 +125,9 @@ public int run(String[] argv) { } catch (DuplicateFileException e) { LOG.error("Duplicate files in input path: ", e); return DistCpConstants.DUPLICATE_INPUT; + } catch (AclsNotSupportedException e) { + LOG.error("ACLs not supported on at least one file system: ", e); + return DistCpConstants.ACLS_NOT_SUPPORTED; } catch (Exception e) { LOG.error("Exception encountered ", e); return DistCpConstants.UNKNOWN_ERROR; @@ -298,7 +301,9 @@ private void configureOutputFormat(Job job) throws IOException { FileSystem targetFS = targetPath.getFileSystem(configuration); targetPath = targetPath.makeQualified(targetFS.getUri(), targetFS.getWorkingDirectory()); - + if (inputOptions.shouldPreserve(DistCpOptions.FileAttribute.ACL)) { + DistCpUtils.checkFileSystemAclSupport(targetFS); + } if (inputOptions.shouldAtomicCommit()) { Path workDir = inputOptions.getAtomicWorkPath(); if (workDir == null) { diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java index 804d2802931..695d8bde394 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java @@ -115,6 +115,7 @@ public class DistCpConstants { public static final int SUCCESS = 0; public static final int INVALID_ARGUMENT = -1; public static final int DUPLICATE_INPUT = -2; + public static final int ACLS_NOT_SUPPORTED = -3; public static final int UNKNOWN_ERROR = -999; /** diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java index c3630cc4fb4..1639c1d8373 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java @@ -45,8 +45,10 @@ public enum DistCpOptionSwitch { * */ PRESERVE_STATUS(DistCpConstants.CONF_LABEL_PRESERVE_STATUS, - new Option("p", true, "preserve status (rbugpc)" + - "(replication, block-size, user, group, permission, checksum-type)")), + new Option("p", true, "preserve status (rbugpca)(replication, " + + "block-size, user, group, permission, checksum-type, ACL). If " + + "-p is specified with no , then preserves replication, block " + + "size, user, group, permission and checksum type.")), /** * Update target location by copying only files that are missing diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java index aaca67db76a..5906266fb78 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java @@ -65,7 +65,7 @@ public class DistCpOptions { private boolean targetPathExists = true; public static enum FileAttribute{ - REPLICATION, BLOCKSIZE, USER, GROUP, PERMISSION, CHECKSUMTYPE; + REPLICATION, BLOCKSIZE, USER, GROUP, PERMISSION, CHECKSUMTYPE, ACL; public static FileAttribute getAttribute(char symbol) { for (FileAttribute attribute : values()) { diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index 7f07927c759..3bce893c14b 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -23,11 +23,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.tools.DistCpOptions.FileAttribute; import org.apache.hadoop.tools.util.DistCpUtils; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.security.Credentials; @@ -35,6 +36,7 @@ import com.google.common.annotations.VisibleForTesting; import java.io.*; +import java.util.List; import java.util.Stack; /** @@ -139,28 +141,34 @@ public void doBuildListing(SequenceFile.Writer fileListWriter, FileStatus rootStatus = sourceFS.getFileStatus(path); Path sourcePathRoot = computeSourceRootPath(rootStatus, options); - boolean localFile = (rootStatus.getClass() != FileStatus.class); FileStatus[] sourceFiles = sourceFS.listStatus(path); boolean explore = (sourceFiles != null && sourceFiles.length > 0); if (!explore || rootStatus.isDirectory()) { - writeToFileListingRoot(fileListWriter, rootStatus, sourcePathRoot, - localFile, options); + CopyListingFileStatus rootCopyListingStatus = + DistCpUtils.toCopyListingFileStatus(sourceFS, rootStatus, + options.shouldPreserve(FileAttribute.ACL)); + writeToFileListingRoot(fileListWriter, rootCopyListingStatus, + sourcePathRoot, options); } if (explore) { for (FileStatus sourceStatus: sourceFiles) { if (LOG.isDebugEnabled()) { LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy."); } - writeToFileListing(fileListWriter, sourceStatus, sourcePathRoot, - localFile, options); + CopyListingFileStatus sourceCopyListingStatus = + DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus, + options.shouldPreserve(FileAttribute.ACL) && + sourceStatus.isDirectory()); + writeToFileListing(fileListWriter, sourceCopyListingStatus, + sourcePathRoot, options); if (isDirectoryAndNotEmpty(sourceFS, sourceStatus)) { if (LOG.isDebugEnabled()) { LOG.debug("Traversing non-empty source dir: " + sourceStatus.getPath()); } traverseNonEmptyDirectory(fileListWriter, sourceStatus, sourcePathRoot, - localFile, options); + options); } } } @@ -233,7 +241,7 @@ private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { return SequenceFile.createWriter(getConf(), SequenceFile.Writer.file(pathToListFile), SequenceFile.Writer.keyClass(Text.class), - SequenceFile.Writer.valueClass(FileStatus.class), + SequenceFile.Writer.valueClass(CopyListingFileStatus.class), SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); } @@ -250,7 +258,6 @@ private static FileStatus[] getChildren(FileSystem fileSystem, private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter, FileStatus sourceStatus, Path sourcePathRoot, - boolean localFile, DistCpOptions options) throws IOException { FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf()); @@ -262,8 +269,11 @@ private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter, if (LOG.isDebugEnabled()) LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy."); - writeToFileListing(fileListWriter, child, sourcePathRoot, - localFile, options); + CopyListingFileStatus childCopyListingStatus = + DistCpUtils.toCopyListingFileStatus(sourceFS, child, + options.shouldPreserve(FileAttribute.ACL) && child.isDirectory()); + writeToFileListing(fileListWriter, childCopyListingStatus, + sourcePathRoot, options); if (isDirectoryAndNotEmpty(sourceFS, child)) { if (LOG.isDebugEnabled()) LOG.debug("Traversing non-empty source dir: " @@ -275,8 +285,7 @@ private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter, } private void writeToFileListingRoot(SequenceFile.Writer fileListWriter, - FileStatus fileStatus, Path sourcePathRoot, - boolean localFile, + CopyListingFileStatus fileStatus, Path sourcePathRoot, DistCpOptions options) throws IOException { boolean syncOrOverwrite = options.shouldSyncFolder() || options.shouldOverwrite(); @@ -288,14 +297,12 @@ private void writeToFileListingRoot(SequenceFile.Writer fileListWriter, } return; } - writeToFileListing(fileListWriter, fileStatus, sourcePathRoot, localFile, - options); + writeToFileListing(fileListWriter, fileStatus, sourcePathRoot, options); } private void writeToFileListing(SequenceFile.Writer fileListWriter, - FileStatus fileStatus, + CopyListingFileStatus fileStatus, Path sourcePathRoot, - boolean localFile, DistCpOptions options) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("REL PATH: " + DistCpUtils.getRelativePath(sourcePathRoot, @@ -303,9 +310,6 @@ private void writeToFileListing(SequenceFile.Writer fileListWriter, } FileStatus status = fileStatus; - if (localFile) { - status = getFileStatus(fileStatus); - } if (!shouldCopy(fileStatus.getPath(), options)) { return; @@ -320,19 +324,4 @@ private void writeToFileListing(SequenceFile.Writer fileListWriter, } totalPaths++; } - - private static final ByteArrayOutputStream buffer = new ByteArrayOutputStream(64); - private DataInputBuffer in = new DataInputBuffer(); - - private FileStatus getFileStatus(FileStatus fileStatus) throws IOException { - FileStatus status = new FileStatus(); - - buffer.reset(); - DataOutputStream out = new DataOutputStream(buffer); - fileStatus.write(out); - - in.reset(buffer.toByteArray(), 0, buffer.size()); - status.readFields(in); - return status; - } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java index c036d3b7021..4d16445d0ea 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java @@ -178,7 +178,7 @@ private void preserveFileAttributesForDirectories(Configuration conf) throws IOE long preservedEntries = 0; try { - FileStatus srcFileStatus = new FileStatus(); + CopyListingFileStatus srcFileStatus = new CopyListingFileStatus(); Text srcRelPath = new Text(); // Iterate over every source path that was copied. @@ -246,9 +246,9 @@ private void deleteMissing(Configuration conf) throws IOException { // Delete all from target that doesn't also exist on source. long deletedEntries = 0; try { - FileStatus srcFileStatus = new FileStatus(); + CopyListingFileStatus srcFileStatus = new CopyListingFileStatus(); Text srcRelPath = new Text(); - FileStatus trgtFileStatus = new FileStatus(); + CopyListingFileStatus trgtFileStatus = new CopyListingFileStatus(); Text trgtRelPath = new Text(); FileSystem targetFS = targetFinalPath.getFileSystem(conf); diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java index 9386f6e9630..caf4057c6c2 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java @@ -24,9 +24,11 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.DistCpOptionSwitch; import org.apache.hadoop.tools.DistCpOptions; @@ -37,12 +39,13 @@ import java.io.*; import java.util.EnumSet; import java.util.Arrays; +import java.util.List; /** * Mapper class that executes the DistCp copy operation. * Implements the o.a.h.mapreduce.Mapper<> interface. */ -public class CopyMapper extends Mapper { +public class CopyMapper extends Mapper { /** * Hadoop counters for the DistCp CopyMapper. @@ -172,8 +175,8 @@ private Path findCacheFile(Path[] cacheFiles, String fileName) { * @throws IOException */ @Override - public void map(Text relPath, FileStatus sourceFileStatus, Context context) - throws IOException, InterruptedException { + public void map(Text relPath, CopyListingFileStatus sourceFileStatus, + Context context) throws IOException, InterruptedException { Path sourcePath = sourceFileStatus.getPath(); if (LOG.isDebugEnabled()) @@ -191,11 +194,13 @@ public void map(Text relPath, FileStatus sourceFileStatus, Context context) LOG.info(description); try { - FileStatus sourceCurrStatus; + CopyListingFileStatus sourceCurrStatus; FileSystem sourceFS; try { sourceFS = sourcePath.getFileSystem(conf); - sourceCurrStatus = sourceFS.getFileStatus(sourcePath); + sourceCurrStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, + sourceFS.getFileStatus(sourcePath), + fileAttributes.contains(FileAttribute.ACL)); } catch (FileNotFoundException e) { throw new IOException(new RetriableFileCopyCommand.CopyReadException(e)); } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java index fd6c0d04611..4add0bb4690 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java @@ -23,11 +23,11 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.util.DistCpUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; import org.apache.hadoop.mapreduce.lib.input.FileSplit; @@ -44,7 +44,8 @@ * that the total-number of bytes to be copied for each input split is * uniform. */ -public class UniformSizeInputFormat extends InputFormat { +public class UniformSizeInputFormat + extends InputFormat { private static final Log LOG = LogFactory.getLog(UniformSizeInputFormat.class); @@ -76,7 +77,7 @@ private List getSplits(Configuration configuration, int numSplits, List splits = new ArrayList(numSplits); long nBytesPerSplit = (long) Math.ceil(totalSizeBytes * 1.0 / numSplits); - FileStatus srcFileStatus = new FileStatus(); + CopyListingFileStatus srcFileStatus = new CopyListingFileStatus(); Text srcRelPath = new Text(); long currentSplitSize = 0; long lastSplitStart = 0; @@ -161,9 +162,9 @@ private SequenceFile.Reader getListingFileReader(Configuration configuration) { * @throws InterruptedException */ @Override - public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) - throws IOException, InterruptedException { - return new SequenceFileRecordReader(); + public RecordReader createRecordReader( + InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { + return new SequenceFileRecordReader(); } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java index 8b188286ee8..8482e7df49a 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java @@ -26,6 +26,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.util.DistCpUtils; import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; @@ -90,7 +91,7 @@ private DynamicInputChunk(String chunkId, Configuration configuration) private void openForWrite() throws IOException { writer = SequenceFile.createWriter( chunkFilePath.getFileSystem(configuration), configuration, - chunkFilePath, Text.class, FileStatus.class, + chunkFilePath, Text.class, CopyListingFileStatus.class, SequenceFile.CompressionType.NONE); } @@ -117,7 +118,7 @@ public static DynamicInputChunk createChunkForWrite(String chunkId, * @param value Corresponding value from the listing file. * @throws IOException Exception onf failure to write to the file. */ - public void write(Text key, FileStatus value) throws IOException { + public void write(Text key, CopyListingFileStatus value) throws IOException { writer.append(key, value); } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java index 14895d30af0..f5303d54c5c 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java @@ -29,7 +29,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.tools.CopyListingFileStatus; import java.util.List; import java.util.ArrayList; @@ -133,7 +133,7 @@ private List createSplits(JobContext jobContext, List chunksFinal = new ArrayList(); - FileStatus fileStatus = new FileStatus(); + CopyListingFileStatus fileStatus = new CopyListingFileStatus(); Text relPath = new Text(); int recordCounter = 0; int chunkCount = 0; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java index 170e2ed23de..653634d8d9f 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java @@ -25,15 +25,21 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclUtil; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpOptions.FileAttribute; import org.apache.hadoop.tools.mapred.UniformSizeInputFormat; +import org.apache.hadoop.tools.CopyListing.AclsNotSupportedException; import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.mapreduce.InputFormat; import java.io.IOException; import java.util.EnumSet; +import java.util.List; import java.util.Locale; import java.text.DecimalFormat; import java.net.URI; @@ -181,7 +187,7 @@ public static EnumSet unpackAttributes(String attributes) { * change or any transient error) */ public static void preserve(FileSystem targetFS, Path path, - FileStatus srcFileStatus, + CopyListingFileStatus srcFileStatus, EnumSet attributes) throws IOException { FileStatus targetFileStatus = targetFS.getFileStatus(path); @@ -189,7 +195,18 @@ public static void preserve(FileSystem targetFS, Path path, String user = targetFileStatus.getOwner(); boolean chown = false; - if (attributes.contains(FileAttribute.PERMISSION) && + if (attributes.contains(FileAttribute.ACL)) { + List srcAcl = srcFileStatus.getAclEntries(); + List targetAcl = getAcl(targetFS, targetFileStatus); + if (!srcAcl.equals(targetAcl)) { + targetFS.setAcl(path, srcAcl); + } + // setAcl can't preserve sticky bit, so also call setPermission if needed. + if (srcFileStatus.getPermission().getStickyBit() != + targetFileStatus.getPermission().getStickyBit()) { + targetFS.setPermission(path, srcFileStatus.getPermission()); + } + } else if (attributes.contains(FileAttribute.PERMISSION) && !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) { targetFS.setPermission(path, srcFileStatus.getPermission()); } @@ -216,6 +233,46 @@ public static void preserve(FileSystem targetFS, Path path, } } + /** + * Returns a file's full logical ACL. + * + * @param fileSystem FileSystem containing the file + * @param fileStatus FileStatus of file + * @return List containing full logical ACL + * @throws IOException if there is an I/O error + */ + public static List getAcl(FileSystem fileSystem, + FileStatus fileStatus) throws IOException { + List entries = fileSystem.getAclStatus(fileStatus.getPath()) + .getEntries(); + return AclUtil.getAclFromPermAndEntries(fileStatus.getPermission(), entries); + } + + /** + * Converts a FileStatus to a CopyListingFileStatus. If preserving ACLs, + * populates the CopyListingFileStatus with the ACLs. + * + * @param fileSystem FileSystem containing the file + * @param fileStatus FileStatus of file + * @param preserveAcls boolean true if preserving ACLs + * @throws IOException if there is an I/O error + */ + public static CopyListingFileStatus toCopyListingFileStatus( + FileSystem fileSystem, FileStatus fileStatus, boolean preserveAcls) + throws IOException { + CopyListingFileStatus copyListingFileStatus = + new CopyListingFileStatus(fileStatus); + if (preserveAcls) { + FsPermission perm = fileStatus.getPermission(); + if (perm.getAclBit()) { + List aclEntries = fileSystem.getAclStatus( + fileStatus.getPath()).getEntries(); + copyListingFileStatus.setAclEntries(aclEntries); + } + } + return copyListingFileStatus; + } + /** * Sort sequence file containing FileStatus and Text as key and value respecitvely * @@ -227,7 +284,8 @@ public static void preserve(FileSystem targetFS, Path path, */ public static Path sortListing(FileSystem fs, Configuration conf, Path sourceListing) throws IOException { - SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class, FileStatus.class, conf); + SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class, + CopyListingFileStatus.class, conf); Path output = new Path(sourceListing.toString() + "_sorted"); if (fs.exists(output)) { @@ -238,6 +296,25 @@ public static Path sortListing(FileSystem fs, Configuration conf, Path sourceLis return output; } + /** + * Determines if a file system supports ACLs by running a canary getAclStatus + * request on the file system root. This method is used before distcp job + * submission to fail fast if the user requested preserving ACLs, but the file + * system cannot support ACLs. + * + * @param fs FileSystem to check + * @throws AclsNotSupportedException if fs does not support ACLs + */ + public static void checkFileSystemAclSupport(FileSystem fs) + throws AclsNotSupportedException { + try { + fs.getAclStatus(new Path(Path.SEPARATOR)); + } catch (Exception e) { + throw new AclsNotSupportedException("ACLs not supported for file system: " + + fs.getUri()); + } + } + /** * String utility to convert a number-of-bytes to human readable format. */ diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java index 0223fbc2e1c..1a2227cf742 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java @@ -23,7 +23,6 @@ import org.apache.hadoop.mapreduce.lib.map.WrappedMapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.io.Text; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.conf.Configuration; import java.util.List; @@ -33,18 +32,19 @@ public class StubContext { private StubStatusReporter reporter = new StubStatusReporter(); - private RecordReader reader; + private RecordReader reader; private StubInMemoryWriter writer = new StubInMemoryWriter(); - private Mapper.Context mapperContext; + private Mapper.Context mapperContext; - public StubContext(Configuration conf, RecordReader reader, - int taskId) throws IOException, InterruptedException { + public StubContext(Configuration conf, + RecordReader reader, int taskId) + throws IOException, InterruptedException { - WrappedMapper wrappedMapper - = new WrappedMapper(); + WrappedMapper wrappedMapper + = new WrappedMapper(); - MapContextImpl contextImpl - = new MapContextImpl(conf, + MapContextImpl contextImpl + = new MapContextImpl(conf, getTaskAttemptID(taskId), reader, writer, null, reporter, null); @@ -52,7 +52,7 @@ public StubContext(Configuration conf, RecordReader reader, this.mapperContext = wrappedMapper.getMapContext(contextImpl); } - public Mapper.Context getContext() { + public Mapper.Context getContext() { return mapperContext; } @@ -60,7 +60,7 @@ public StatusReporter getReporter() { return reporter; } - public RecordReader getReader() { + public RecordReader getReader() { return reader; } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java index 7acf2ab86f3..d8f7e0b5d85 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java @@ -24,7 +24,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; import org.apache.hadoop.tools.util.TestDistCpUtils; @@ -106,7 +105,7 @@ protected boolean shouldCopy(Path path, DistCpOptions options) { Assert.assertEquals(listing.getNumberOfPaths(), 3); SequenceFile.Reader reader = new SequenceFile.Reader(getConf(), SequenceFile.Reader.file(listingFile)); - FileStatus fileStatus = new FileStatus(); + CopyListingFileStatus fileStatus = new CopyListingFileStatus(); Text relativePath = new Text(); Assert.assertTrue(reader.next(relativePath, fileStatus)); Assert.assertEquals(relativePath.toString(), "/1"); @@ -274,7 +273,7 @@ public void testBuildListingForSingleFile() { reader = new SequenceFile.Reader(getConf(), SequenceFile.Reader.file(listFile)); - FileStatus fileStatus = new FileStatus(); + CopyListingFileStatus fileStatus = new CopyListingFileStatus(); Text relativePath = new Text(); Assert.assertTrue(reader.next(relativePath, fileStatus)); Assert.assertTrue(relativePath.toString().equals("")); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java new file mode 100644 index 00000000000..e8399912b38 --- /dev/null +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java @@ -0,0 +1,329 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.tools; + +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.junit.Assert.*; + +import java.io.IOException; +import java.net.URI; +import java.util.Arrays; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.ToolRunner; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests distcp in combination with HDFS ACLs. + */ +public class TestDistCpWithAcls { + + private static MiniDFSCluster cluster; + private static Configuration conf; + private static FileSystem fs; + + @BeforeClass + public static void init() throws Exception { + initCluster(true, true); + // Create this directory structure: + // /src + // /dir1 + // /subdir1 + // /dir2 + // /dir2/file2 + // /dir2/file3 + // /dir3sticky + // /file1 + fs.mkdirs(new Path("/src/dir1/subdir1")); + fs.mkdirs(new Path("/src/dir2")); + fs.create(new Path("/src/dir2/file2")).close(); + fs.create(new Path("/src/dir2/file3")).close(); + fs.mkdirs(new Path("/src/dir3sticky")); + fs.create(new Path("/src/file1")).close(); + + // Set a mix of ACLs and plain permissions throughout the tree. + fs.modifyAclEntries(new Path("/src/dir1"), Arrays.asList( + aclEntry(DEFAULT, USER, "bruce", ALL))); + + fs.modifyAclEntries(new Path("/src/dir2/file2"), Arrays.asList( + aclEntry(ACCESS, GROUP, "sales", NONE))); + + fs.setPermission(new Path("/src/dir2/file3"), + new FsPermission((short)0660)); + + fs.modifyAclEntries(new Path("/src/file1"), Arrays.asList( + aclEntry(ACCESS, USER, "diana", READ))); + + fs.setPermission(new Path("/src/dir3sticky"), + new FsPermission((short)01777)); + } + + @AfterClass + public static void shutdown() { + IOUtils.cleanup(null, fs); + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testPreserveAcls() throws Exception { + assertRunDistCp(DistCpConstants.SUCCESS, "/dstPreserveAcls"); + + assertAclEntries("/dstPreserveAcls/dir1", new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, READ_EXECUTE) } ); + assertPermission("/dstPreserveAcls/dir1", (short)0755); + + assertAclEntries("/dstPreserveAcls/dir1/subdir1", new AclEntry[] { }); + assertPermission("/dstPreserveAcls/dir1/subdir1", (short)0755); + + assertAclEntries("/dstPreserveAcls/dir2", new AclEntry[] { }); + assertPermission("/dstPreserveAcls/dir2", (short)0755); + + assertAclEntries("/dstPreserveAcls/dir2/file2", new AclEntry[] { + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, GROUP, "sales", NONE) } ); + assertPermission("/dstPreserveAcls/dir2/file2", (short)0644); + + assertAclEntries("/dstPreserveAcls/dir2/file3", new AclEntry[] { }); + assertPermission("/dstPreserveAcls/dir2/file3", (short)0660); + + assertAclEntries("/dstPreserveAcls/dir3sticky", new AclEntry[] { }); + assertPermission("/dstPreserveAcls/dir3sticky", (short)01777); + + assertAclEntries("/dstPreserveAcls/file1", new AclEntry[] { + aclEntry(ACCESS, USER, "diana", READ), + aclEntry(ACCESS, GROUP, READ) } ); + assertPermission("/dstPreserveAcls/file1", (short)0644); + } + + @Test + public void testAclsNotEnabled() throws Exception { + try { + restart(false); + assertRunDistCp(DistCpConstants.ACLS_NOT_SUPPORTED, "/dstAclsNotEnabled"); + } finally { + restart(true); + } + } + + @Test + public void testAclsNotImplemented() throws Exception { + assertRunDistCp(DistCpConstants.ACLS_NOT_SUPPORTED, + "stubfs://dstAclsNotImplemented"); + } + + /** + * Stub FileSystem implementation used for testing the case of attempting + * distcp with ACLs preserved on a file system that does not support ACLs. + * The base class implementation throws UnsupportedOperationException for the + * ACL methods, so we don't need to override them. + */ + public static class StubFileSystem extends FileSystem { + + @Override + public FSDataOutputStream append(Path f, int bufferSize, + Progressable progress) throws IOException { + return null; + } + + @Override + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + return null; + } + + @Override + public boolean delete(Path f, boolean recursive) throws IOException { + return false; + } + + @Override + public FileStatus getFileStatus(Path f) throws IOException { + return null; + } + + @Override + public URI getUri() { + return URI.create("stubfs:///"); + } + + @Override + public Path getWorkingDirectory() { + return new Path(Path.SEPARATOR); + } + + @Override + public FileStatus[] listStatus(Path f) throws IOException { + return null; + } + + @Override + public boolean mkdirs(Path f, FsPermission permission) + throws IOException { + return false; + } + + @Override + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + return null; + } + + @Override + public boolean rename(Path src, Path dst) throws IOException { + return false; + } + + @Override + public void setWorkingDirectory(Path dir) { + } + } + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param permission FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder() + .setScope(scope) + .setType(type) + .setPermission(permission) + .build(); + } + + /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param name String optional ACL entry name + * @param permission FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder() + .setScope(scope) + .setType(type) + .setName(name) + .setPermission(permission) + .build(); + } + + /** + * Asserts the ACL entries returned by getAclStatus for a specific path. + * + * @param path String path to check + * @param entries AclEntry[] expected ACL entries + * @throws Exception if there is any error + */ + private static void assertAclEntries(String path, AclEntry[] entries) + throws Exception { + assertArrayEquals(entries, fs.getAclStatus(new Path(path)).getEntries() + .toArray(new AclEntry[0])); + } + + /** + * Asserts the value of the FsPermission bits on the inode of a specific path. + * + * @param path String path to check + * @param perm short expected permission bits + * @throws Exception if there is any error + */ + private static void assertPermission(String path, short perm) + throws Exception { + assertEquals(perm, + fs.getFileStatus(new Path(path)).getPermission().toShort()); + } + + /** + * Runs distcp from /src to specified destination, preserving ACLs. Asserts + * expected exit code. + * + * @param int exitCode expected exit code + * @param dst String distcp destination + * @throws Exception if there is any error + */ + private static void assertRunDistCp(int exitCode, String dst) + throws Exception { + DistCp distCp = new DistCp(conf, null); + assertEquals(exitCode, ToolRunner.run( + conf, distCp, new String[] { "-pa", "/src", dst })); + } + + /** + * Initialize the cluster, wait for it to become active, and get FileSystem. + * + * @param format if true, format the NameNode and DataNodes before starting up + * @param aclsEnabled if true, ACL support is enabled + * @throws Exception if any step fails + */ + private static void initCluster(boolean format, boolean aclsEnabled) + throws Exception { + conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, aclsEnabled); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///"); + conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) + .build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + } + + /** + * Restarts the cluster with ACLs enabled or disabled. + * + * @param aclsEnabled if true, ACL support is enabled + * @throws Exception if any step fails + */ + private static void restart(boolean aclsEnabled) throws Exception { + shutdown(); + initCluster(false, aclsEnabled); + } +} diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestFileBasedCopyListing.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestFileBasedCopyListing.java index c4e451568d4..fe2c66870e6 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestFileBasedCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestFileBasedCopyListing.java @@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; @@ -531,7 +530,7 @@ private void checkResult(Path listFile, int count) throws IOException { SequenceFile.Reader.file(listFile)); try { Text relPath = new Text(); - FileStatus fileStatus = new FileStatus(); + CopyListingFileStatus fileStatus = new CopyListingFileStatus(); while (reader.next(relPath, fileStatus)) { if (fileStatus.isDirectory() && relPath.toString().equals("")) { // ignore root with empty relPath, which is an entry to be diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java index a91b201c594..6c03b4ee8a8 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java @@ -19,7 +19,6 @@ package org.apache.hadoop.tools; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -121,7 +120,7 @@ private void verifyContents(Path listingPath) throws Exception { SequenceFile.Reader reader = new SequenceFile.Reader(cluster.getFileSystem(), listingPath, new Configuration()); Text key = new Text(); - FileStatus value = new FileStatus(); + CopyListingFileStatus value = new CopyListingFileStatus(); Map actualValues = new HashMap(); while (reader.next(key, value)) { if (value.isDirectory() && key.toString().equals("")) { diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java index 616b5815b77..296e994f930 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java @@ -410,6 +410,7 @@ public void testPreserve() { Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); + Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); options = OptionsParser.parse(new String[] { "-p", @@ -421,6 +422,7 @@ public void testPreserve() { Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); + Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); options = OptionsParser.parse(new String[] { "-pbr", @@ -433,6 +435,7 @@ public void testPreserve() { Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); + Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); options = OptionsParser.parse(new String[] { "-pbrgup", @@ -445,9 +448,10 @@ public void testPreserve() { Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); + Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); options = OptionsParser.parse(new String[] { - "-pbrgupc", + "-pbrgupca", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); @@ -457,6 +461,7 @@ public void testPreserve() { Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); + Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL)); options = OptionsParser.parse(new String[] { "-pc", @@ -469,6 +474,7 @@ public void testPreserve() { Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); + Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); options = OptionsParser.parse(new String[] { "-p", @@ -485,7 +491,7 @@ public void testPreserve() { try { OptionsParser.parse(new String[] { - "-pabc", + "-pabcd", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target"}); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java index 4ba95ec99bc..7eb1b6801e0 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java @@ -42,6 +42,7 @@ import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.DistCpOptionSwitch; import org.apache.hadoop.tools.DistCpOptions; @@ -222,7 +223,7 @@ private void testCopy(boolean preserveChecksum) { FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); Configuration configuration = context.getConfiguration(); @@ -238,7 +239,7 @@ private void testCopy(boolean preserveChecksum) { for (Path path: pathList) { copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), - fs.getFileStatus(path), context); + new CopyListingFileStatus(fs.getFileStatus(path)), context); } // Check that the maps worked. @@ -283,12 +284,11 @@ private void testCopy(boolean preserveChecksum) { } private void testCopyingExistingFiles(FileSystem fs, CopyMapper copyMapper, - Mapper.Context context) { - + Mapper.Context context) { try { for (Path path : pathList) { copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), - fs.getFileStatus(path), context); + new CopyListingFileStatus(fs.getFileStatus(path)), context); } Assert.assertEquals(nFiles, @@ -309,7 +309,7 @@ public void testMakeDirFailure() { FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); Configuration configuration = context.getConfiguration(); @@ -320,7 +320,7 @@ public void testMakeDirFailure() { copyMapper.setup(context); copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))), - fs.getFileStatus(pathList.get(0)), context); + new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context); Assert.assertTrue("There should have been an exception.", false); } @@ -343,7 +343,7 @@ public void testDirToFile() { FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); mkdirs(SOURCE_PATH + "/src/file"); @@ -351,7 +351,8 @@ public void testDirToFile() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), - fs.getFileStatus(new Path(SOURCE_PATH + "/src/file")), + new CopyListingFileStatus(fs.getFileStatus( + new Path(SOURCE_PATH + "/src/file"))), context); } catch (IOException e) { Assert.assertTrue(e.getMessage().startsWith("Can't replace")); @@ -372,22 +373,24 @@ public void testPreserve() { final CopyMapper copyMapper = new CopyMapper(); - final Mapper.Context context = tmpUser. - doAs(new PrivilegedAction.Context>() { - @Override - public Mapper.Context run() { - try { - StubContext stubContext = new StubContext(getConfiguration(), null, 0); - return stubContext.getContext(); - } catch (Exception e) { - LOG.error("Exception encountered ", e); - throw new RuntimeException(e); - } - } - }); + final Mapper.Context context = + tmpUser.doAs( + new PrivilegedAction.Context>() { + @Override + public Mapper.Context run() { + try { + StubContext stubContext = new StubContext(getConfiguration(), null, 0); + return stubContext.getContext(); + } catch (Exception e) { + LOG.error("Exception encountered ", e); + throw new RuntimeException(e); + } + } + }); EnumSet preserveStatus = EnumSet.allOf(DistCpOptions.FileAttribute.class); + preserveStatus.remove(DistCpOptions.FileAttribute.ACL); context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS, DistCpUtils.packAttributes(preserveStatus)); @@ -415,7 +418,8 @@ public Integer run() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), - tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")), + new CopyListingFileStatus(tmpFS.getFileStatus( + new Path(SOURCE_PATH + "/src/file"))), context); Assert.fail("Expected copy to fail"); } catch (AccessControlException e) { @@ -442,19 +446,20 @@ public void testCopyReadableFiles() { final CopyMapper copyMapper = new CopyMapper(); - final Mapper.Context context = tmpUser. - doAs(new PrivilegedAction.Context>() { - @Override - public Mapper.Context run() { - try { - StubContext stubContext = new StubContext(getConfiguration(), null, 0); - return stubContext.getContext(); - } catch (Exception e) { - LOG.error("Exception encountered ", e); - throw new RuntimeException(e); - } - } - }); + final Mapper.Context context = + tmpUser.doAs( + new PrivilegedAction.Context>() { + @Override + public Mapper.Context run() { + try { + StubContext stubContext = new StubContext(getConfiguration(), null, 0); + return stubContext.getContext(); + } catch (Exception e) { + LOG.error("Exception encountered ", e); + throw new RuntimeException(e); + } + } + }); touchFile(SOURCE_PATH + "/src/file"); mkdirs(TARGET_PATH); @@ -481,7 +486,8 @@ public Integer run() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), - tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")), + new CopyListingFileStatus(tmpFS.getFileStatus( + new Path(SOURCE_PATH + "/src/file"))), context); } catch (Exception e) { throw new RuntimeException(e); @@ -518,9 +524,11 @@ public StubContext run() { } }); - final Mapper.Context context = stubContext.getContext(); + final Mapper.Context context = + stubContext.getContext(); EnumSet preserveStatus = EnumSet.allOf(DistCpOptions.FileAttribute.class); + preserveStatus.remove(DistCpOptions.FileAttribute.ACL); context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS, DistCpUtils.packAttributes(preserveStatus)); @@ -551,7 +559,8 @@ public Integer run() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), - tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")), + new CopyListingFileStatus(tmpFS.getFileStatus( + new Path(SOURCE_PATH + "/src/file"))), context); Assert.assertEquals(stubContext.getWriter().values().size(), 1); Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP")); @@ -594,8 +603,9 @@ public StubContext run() { EnumSet preserveStatus = EnumSet.allOf(DistCpOptions.FileAttribute.class); + preserveStatus.remove(DistCpOptions.FileAttribute.ACL); - final Mapper.Context context + final Mapper.Context context = stubContext.getContext(); context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS, @@ -629,7 +639,8 @@ public Integer run() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), - tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")), + new CopyListingFileStatus(tmpFS.getFileStatus( + new Path(SOURCE_PATH + "/src/file"))), context); Assert.fail("Didn't expect the file to be copied"); } catch (AccessControlException ignore) { @@ -661,7 +672,7 @@ public void testFileToDir() { FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); touchFile(SOURCE_PATH + "/src/file"); @@ -669,7 +680,8 @@ public void testFileToDir() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), - fs.getFileStatus(new Path(SOURCE_PATH + "/src/file")), + new CopyListingFileStatus(fs.getFileStatus( + new Path(SOURCE_PATH + "/src/file"))), context); } catch (IOException e) { Assert.assertTrue(e.getMessage().startsWith("Can't replace")); @@ -688,7 +700,7 @@ private void doTestIgnoreFailures(boolean ignoreFailures) { FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); Configuration configuration = context.getConfiguration(); @@ -705,7 +717,7 @@ private void doTestIgnoreFailures(boolean ignoreFailures) { if (!fileStatus.isDirectory()) { fs.delete(path, true); copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), - fileStatus, context); + new CopyListingFileStatus(fileStatus), context); } } if (ignoreFailures) { @@ -745,7 +757,7 @@ public void testCopyFailOnBlockSizeDifference() { FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); Configuration configuration = context.getConfiguration(); @@ -759,7 +771,7 @@ public void testCopyFailOnBlockSizeDifference() { for (Path path : pathList) { final FileStatus fileStatus = fs.getFileStatus(path); copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), - fileStatus, context); + new CopyListingFileStatus(fileStatus), context); } Assert.fail("Copy should have failed because of block-size difference."); @@ -780,7 +792,7 @@ private void testPreserveBlockSizeAndReplicationImpl(boolean preserve){ FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); Configuration configuration = context.getConfiguration(); @@ -798,7 +810,7 @@ private void testPreserveBlockSizeAndReplicationImpl(boolean preserve){ for (Path path : pathList) { final FileStatus fileStatus = fs.getFileStatus(path); copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), - fileStatus, context); + new CopyListingFileStatus(fileStatus), context); } // Check that the block-size/replication aren't preserved. @@ -855,7 +867,7 @@ public void testSingleFileCopy() { FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); context.getConfiguration().set( @@ -863,7 +875,8 @@ public void testSingleFileCopy() { targetFilePath.getParent().toString()); // Parent directory. copyMapper.setup(context); - final FileStatus sourceFileStatus = fs.getFileStatus(sourceFilePath); + final CopyListingFileStatus sourceFileStatus = new CopyListingFileStatus( + fs.getFileStatus(sourceFilePath)); long before = fs.getFileStatus(targetFilePath).getModificationTime(); copyMapper.map(new Text(DistCpUtils.getRelativePath( @@ -907,7 +920,7 @@ private void testPreserveUserGroupImpl(boolean preserve){ FileSystem fs = cluster.getFileSystem(); CopyMapper copyMapper = new CopyMapper(); StubContext stubContext = new StubContext(getConfiguration(), null, 0); - Mapper.Context context + Mapper.Context context = stubContext.getContext(); Configuration configuration = context.getConfiguration(); @@ -926,7 +939,7 @@ private void testPreserveUserGroupImpl(boolean preserve){ for (Path path : pathList) { final FileStatus fileStatus = fs.getFileStatus(path); copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), - fileStatus, context); + new CopyListingFileStatus(fileStatus), context); } // Check that the user/group attributes are preserved diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java index 93fad905f29..78e226252d2 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapreduce.task.JobContextImpl; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.tools.CopyListing; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.tools.StubContext; import org.apache.hadoop.security.Credentials; @@ -122,8 +123,8 @@ public void testGetSplits(int nMaps) throws Exception { for (int i=0; i recordReader = uniformSizeInputFormat.createRecordReader( - split, null); + RecordReader recordReader = + uniformSizeInputFormat.createRecordReader(split, null); StubContext stubContext = new StubContext(jobContext.getConfiguration(), recordReader, 0); final TaskAttemptContext taskAttemptContext @@ -168,7 +169,7 @@ private void checkSplits(Path listFile, List splits) throws IOExcept try { reader.seek(lastEnd); - FileStatus srcFileStatus = new FileStatus(); + CopyListingFileStatus srcFileStatus = new CopyListingFileStatus(); Text srcRelPath = new Text(); Assert.assertFalse(reader.next(srcRelPath, srcFileStatus)); } finally { diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java index ad67eb0371c..8cc8317b994 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java @@ -25,13 +25,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.task.JobContextImpl; import org.apache.hadoop.tools.CopyListing; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.tools.StubContext; import org.apache.hadoop.security.Credentials; @@ -118,15 +118,15 @@ public void testGetSplits() throws Exception { +"/tmp/testDynInputFormat/fileList.seq"), options); JobContext jobContext = new JobContextImpl(configuration, new JobID()); - DynamicInputFormat inputFormat = - new DynamicInputFormat(); + DynamicInputFormat inputFormat = + new DynamicInputFormat(); List splits = inputFormat.getSplits(jobContext); int nFiles = 0; int taskId = 0; for (InputSplit split : splits) { - RecordReader recordReader = + RecordReader recordReader = inputFormat.createRecordReader(split, null); StubContext stubContext = new StubContext(jobContext.getConfiguration(), recordReader, taskId); @@ -136,7 +136,7 @@ public void testGetSplits() throws Exception { recordReader.initialize(splits.get(0), taskAttemptContext); float previousProgressValue = 0f; while (recordReader.nextKeyValue()) { - FileStatus fileStatus = recordReader.getCurrentValue(); + CopyListingFileStatus fileStatus = recordReader.getCurrentValue(); String source = fileStatus.getPath().toString(); System.out.println(source); Assert.assertTrue(expectedFilePaths.contains(source)); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java index 762979287dd..4825e15cee2 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.junit.Assert; @@ -106,7 +107,8 @@ public void testPreserve() { Path src = new Path("/tmp/src"); fs.mkdirs(path); fs.mkdirs(src); - FileStatus srcStatus = fs.getFileStatus(src); + CopyListingFileStatus srcStatus = new CopyListingFileStatus( + fs.getFileStatus(src)); FsPermission noPerm = new FsPermission((short) 0); fs.setPermission(path, noPerm); diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index eb7b44b4eba..da69c048427 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -26,6 +26,9 @@ Release 2.5.0 - UNRELEASED YARN-1864. Fair Scheduler Dynamic Hierarchical User Queues (Ashwin Shankar via Sandy Ryza) + YARN-1362. Distinguish between nodemanager shutdown for decommission vs shutdown + for restart. (Jason Lowe via junping_du) + IMPROVEMENTS YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via @@ -108,11 +111,8 @@ Release 2.5.0 - UNRELEASED YARN-2011. Fix typo and warning in TestLeafQueue (Chen He via junping_du) - YARN-1976. Fix yarn application CLI to print the scheme of the tracking url - of failed/killed applications. (Junping Du via jianhe) - - YARN-2016. Fix a bug in GetApplicationsRequestPBImpl to add the missed fields - to proto. (Junping Du via jianhe) + YARN-2042. String shouldn't be compared using == in + QueuePlacementRule#NestedUserQueue#getQueueForApp (Chen He via Sandy Ryza) Release 2.4.1 - UNRELEASED @@ -216,6 +216,21 @@ Release 2.4.1 - UNRELEASED causing both RMs to be stuck in standby mode when automatic failover is enabled. (Karthik Kambatla and Xuan Gong via vinodkv) + YARN-1957. Consider the max capacity of the queue when computing the ideal + capacity for preemption. (Carlo Curino via cdouglas) + + YARN-1986. In Fifo Scheduler, node heartbeat in between creating app and + attempt causes NPE (Hong Zhiguo via Sandy Ryza) + + YARN-1976. Fix yarn application CLI to print the scheme of the tracking url + of failed/killed applications. (Junping Du via jianhe) + + YARN-2016. Fix a bug in GetApplicationsRequestPBImpl to add the missed fields + to proto. (Junping Du via jianhe) + + YARN-2053. Fixed a bug in AMS to not add null NMToken into NMTokens list from + previous attempts for work-preserving AM restart. (Wangda Tan via jianhe) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java index 729e0433d08..c1c57b4b1f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java @@ -66,4 +66,8 @@ public interface Context { LocalDirsHandlerService getLocalDirsHandler(); ApplicationACLsManager getApplicationACLsManager(); + + boolean getDecommissioned(); + + void setDecommissioned(boolean isDecommissioned); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 57ff127dbaa..913578fcf91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -272,7 +272,8 @@ public static class NMContext implements Context { private WebServer webServer; private final NodeHealthStatus nodeHealthStatus = RecordFactoryProvider .getRecordFactory(null).newRecordInstance(NodeHealthStatus.class); - + private boolean isDecommissioned = false; + public NMContext(NMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInNM nmTokenSecretManager, LocalDirsHandlerService dirsHandler, ApplicationACLsManager aclsManager) { @@ -349,6 +350,16 @@ public LocalDirsHandlerService getLocalDirsHandler() { public ApplicationACLsManager getApplicationACLsManager() { return aclsManager; } + + @Override + public boolean getDecommissioned() { + return isDecommissioned; + } + + @Override + public void setDecommissioned(boolean isDecommissioned) { + this.isDecommissioned = isDecommissioned; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 4db000c6014..df99737d4b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -493,6 +493,7 @@ public void run() { + " hence shutting down."); LOG.warn("Message from ResourceManager: " + response.getDiagnosticsMessage()); + context.setDecommissioned(true); dispatcher.getEventHandler().handle( new NodeManagerEvent(NodeManagerEventType.SHUTDOWN)); break; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 0ff34a51725..3729ab1eb5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -930,6 +930,7 @@ public void testNodeDecommision() throws Exception { Thread.sleep(500); } Assert.assertFalse(heartBeatID < 1); + Assert.assertTrue(nm.getNMContext().getDecommissioned()); // NM takes a while to reach the STOPPED state. waitCount = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index 6b2cb7f876a..94dc47437fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -298,9 +298,12 @@ public RegisterApplicationMasterResponse registerApplicationMaster( List nmTokens = new ArrayList(); for (Container container : transferredContainers) { try { - nmTokens.add(rmContext.getNMTokenSecretManager() - .createAndGetNMToken(app.getUser(), applicationAttemptId, - container)); + NMToken token = rmContext.getNMTokenSecretManager() + .createAndGetNMToken(app.getUser(), applicationAttemptId, + container); + if (null != token) { + nmTokens.add(token); + } } catch (IllegalArgumentException e) { // if it's a DNS issue, throw UnknowHostException directly and that // will be automatically retried by RMProxy in RPC layer. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index 7ea73d9471e..684c82ba652 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -293,34 +294,31 @@ private void computeIdealResourceDistribution(ResourceCalculator rc, // with the total capacity for this set of queues Resource unassigned = Resources.clone(tot_guarant); - //assign all cluster resources until no more demand, or no resources are left - while (!qAlloc.isEmpty() && Resources.greaterThan(rc, tot_guarant, - unassigned, Resources.none())) { - Resource wQassigned = Resource.newInstance(0, 0); + // group queues based on whether they have non-zero guaranteed capacity + Set nonZeroGuarQueues = new HashSet(); + Set zeroGuarQueues = new HashSet(); - // we compute normalizedGuarantees capacity based on currently active - // queues - resetCapacity(rc, unassigned, qAlloc); - - // offer for each queue their capacity first and in following invocations - // their share of over-capacity - for (Iterator i = qAlloc.iterator(); i.hasNext();) { - TempQueue sub = i.next(); - Resource wQavail = - Resources.multiply(unassigned, sub.normalizedGuarantee); - Resource wQidle = sub.offer(wQavail, rc, tot_guarant); - Resource wQdone = Resources.subtract(wQavail, wQidle); - // if the queue returned a value > 0 it means it is fully satisfied - // and it is removed from the list of active queues qAlloc - if (!Resources.greaterThan(rc, tot_guarant, - wQdone, Resources.none())) { - i.remove(); - } - Resources.addTo(wQassigned, wQdone); + for (TempQueue q : qAlloc) { + if (Resources + .greaterThan(rc, tot_guarant, q.guaranteed, Resources.none())) { + nonZeroGuarQueues.add(q); + } else { + zeroGuarQueues.add(q); } - Resources.subtractFrom(unassigned, wQassigned); } + // first compute the allocation as a fixpoint based on guaranteed capacity + computeFixpointAllocation(rc, tot_guarant, nonZeroGuarQueues, unassigned, + false); + + // if any capacity is left unassigned, distributed among zero-guarantee + // queues uniformly (i.e., not based on guaranteed capacity, as this is zero) + if (!zeroGuarQueues.isEmpty() + && Resources.greaterThan(rc, tot_guarant, unassigned, Resources.none())) { + computeFixpointAllocation(rc, tot_guarant, zeroGuarQueues, unassigned, + true); + } + // based on ideal assignment computed above and current assignment we derive // how much preemption is required overall Resource totPreemptionNeeded = Resource.newInstance(0, 0); @@ -353,6 +351,46 @@ private void computeIdealResourceDistribution(ResourceCalculator rc, } } + + /** + * Given a set of queues compute the fix-point distribution of unassigned + * resources among them. As pending request of a queue are exhausted, the + * queue is removed from the set and remaining capacity redistributed among + * remaining queues. The distribution is weighted based on guaranteed + * capacity, unless asked to ignoreGuarantee, in which case resources are + * distributed uniformly. + */ + private void computeFixpointAllocation(ResourceCalculator rc, + Resource tot_guarant, Collection qAlloc, Resource unassigned, + boolean ignoreGuarantee) { + //assign all cluster resources until no more demand, or no resources are left + while (!qAlloc.isEmpty() && Resources.greaterThan(rc, tot_guarant, + unassigned, Resources.none())) { + Resource wQassigned = Resource.newInstance(0, 0); + + // we compute normalizedGuarantees capacity based on currently active + // queues + resetCapacity(rc, unassigned, qAlloc, ignoreGuarantee); + + // offer for each queue their capacity first and in following invocations + // their share of over-capacity + for (Iterator i = qAlloc.iterator(); i.hasNext();) { + TempQueue sub = i.next(); + Resource wQavail = + Resources.multiply(unassigned, sub.normalizedGuarantee); + Resource wQidle = sub.offer(wQavail, rc, tot_guarant); + Resource wQdone = Resources.subtract(wQavail, wQidle); + // if the queue returned a value > 0 it means it is fully satisfied + // and it is removed from the list of active queues qAlloc + if (!Resources.greaterThan(rc, tot_guarant, + wQdone, Resources.none())) { + i.remove(); + } + Resources.addTo(wQassigned, wQdone); + } + Resources.subtractFrom(unassigned, wQassigned); + } + } /** * Computes a normalizedGuaranteed capacity based on active queues @@ -361,14 +399,21 @@ private void computeIdealResourceDistribution(ResourceCalculator rc, * @param queues the list of queues to consider */ private void resetCapacity(ResourceCalculator rc, Resource clusterResource, - List queues) { + Collection queues, boolean ignoreGuar) { Resource activeCap = Resource.newInstance(0, 0); - for (TempQueue q : queues) { - Resources.addTo(activeCap, q.guaranteed); - } - for (TempQueue q : queues) { - q.normalizedGuarantee = Resources.divide(rc, clusterResource, - q.guaranteed, activeCap); + + if (ignoreGuar) { + for (TempQueue q : queues) { + q.normalizedGuarantee = (float) 1.0f / ((float) queues.size()); + } + } else { + for (TempQueue q : queues) { + Resources.addTo(activeCap, q.guaranteed); + } + for (TempQueue q : queues) { + q.normalizedGuarantee = Resources.divide(rc, clusterResource, + q.guaranteed, activeCap); + } } } @@ -515,18 +560,25 @@ public String getPolicyName() { private TempQueue cloneQueues(CSQueue root, Resource clusterResources) { TempQueue ret; synchronized (root) { - float absUsed = root.getAbsoluteUsedCapacity(); + String queueName = root.getQueueName(); + float absUsed = root.getAbsoluteUsedCapacity(); + float absCap = root.getAbsoluteCapacity(); + float absMaxCap = root.getAbsoluteMaximumCapacity(); + Resource current = Resources.multiply(clusterResources, absUsed); - Resource guaranteed = - Resources.multiply(clusterResources, root.getAbsoluteCapacity()); + Resource guaranteed = Resources.multiply(clusterResources, absCap); + Resource maxCapacity = Resources.multiply(clusterResources, absMaxCap); if (root instanceof LeafQueue) { LeafQueue l = (LeafQueue) root; Resource pending = l.getTotalResourcePending(); - ret = new TempQueue(root.getQueueName(), current, pending, guaranteed); + ret = new TempQueue(queueName, current, pending, guaranteed, + maxCapacity); + ret.setLeafQueue(l); } else { Resource pending = Resource.newInstance(0, 0); - ret = new TempQueue(root.getQueueName(), current, pending, guaranteed); + ret = new TempQueue(root.getQueueName(), current, pending, guaranteed, + maxCapacity); for (CSQueue c : root.getChildQueues()) { ret.addChild(cloneQueues(c, clusterResources)); } @@ -563,6 +615,7 @@ static class TempQueue { final Resource current; final Resource pending; final Resource guaranteed; + final Resource maxCapacity; Resource idealAssigned; Resource toBePreempted; Resource actuallyPreempted; @@ -573,11 +626,12 @@ static class TempQueue { LeafQueue leafQueue; TempQueue(String queueName, Resource current, Resource pending, - Resource guaranteed) { + Resource guaranteed, Resource maxCapacity) { this.queueName = queueName; this.current = current; this.pending = pending; this.guaranteed = guaranteed; + this.maxCapacity = maxCapacity; this.idealAssigned = Resource.newInstance(0, 0); this.actuallyPreempted = Resource.newInstance(0, 0); this.toBePreempted = Resource.newInstance(0, 0); @@ -614,12 +668,12 @@ public ArrayList getChildren(){ // the unused ones Resource offer(Resource avail, ResourceCalculator rc, Resource clusterResource) { - // remain = avail - min(avail, current + pending - assigned) - Resource accepted = Resources.min(rc, clusterResource, - avail, - Resources.subtract( - Resources.add(current, pending), - idealAssigned)); + // remain = avail - min(avail, (max - assigned), (current + pending - assigned)) + Resource accepted = + Resources.min(rc, clusterResource, + Resources.subtract(maxCapacity, idealAssigned), + Resources.min(rc, clusterResource, avail, Resources.subtract( + Resources.add(current, pending), idealAssigned))); Resource remain = Resources.subtract(avail, accepted); Resources.addTo(idealAssigned, accepted); return remain; @@ -628,13 +682,15 @@ Resource offer(Resource avail, ResourceCalculator rc, @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("CUR: ").append(current) + sb.append(" NAME: " + queueName) + .append(" CUR: ").append(current) .append(" PEN: ").append(pending) .append(" GAR: ").append(guaranteed) .append(" NORM: ").append(normalizedGuarantee) .append(" IDEAL_ASSIGNED: ").append(idealAssigned) .append(" IDEAL_PREEMPT: ").append(toBePreempted) - .append(" ACTUAL_PREEMPT: ").append(actuallyPreempted); + .append(" ACTUAL_PREEMPT: ").append(actuallyPreempted) + .append("\n"); return sb.toString(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java index b115ecf5af4..530efac0260 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java @@ -227,7 +227,7 @@ protected String getQueueForApp(String requestedQueue, String user, String queueName = nestedRule.assignAppToQueue(requestedQueue, user, groups, configuredQueues); - if (queueName != null && queueName != "") { + if (queueName != null && queueName.length() != 0) { if (!queueName.startsWith("root.")) { queueName = "root." + queueName; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 82000e1e67b..21fcdecf4f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -360,7 +360,8 @@ private FiCaSchedulerNode getNode(NodeId nodeId) { return nodes.get(nodeId); } - private synchronized void addApplication(ApplicationId applicationId, + @VisibleForTesting + public synchronized void addApplication(ApplicationId applicationId, String queue, String user) { SchedulerApplication application = new SchedulerApplication(DEFAULT_QUEUE, user); @@ -372,7 +373,8 @@ private synchronized void addApplication(ApplicationId applicationId, .handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED)); } - private synchronized void + @VisibleForTesting + public synchronized void addApplicationAttempt(ApplicationAttemptId appAttemptId, boolean transferStateFromPreviousAttempt) { SchedulerApplication application = @@ -458,6 +460,9 @@ private void assignContainers(FiCaSchedulerNode node) { .entrySet()) { FiCaSchedulerApp application = (FiCaSchedulerApp) e.getValue().getCurrentAppAttempt(); + if (application == null) { + continue; + } LOG.debug("pre-assignContainers"); application.showRequests(); synchronized (application) { @@ -497,6 +502,9 @@ private void assignContainers(FiCaSchedulerNode node) { for (SchedulerApplication application : applications.values()) { FiCaSchedulerApp attempt = (FiCaSchedulerApp) application.getCurrentAppAttempt(); + if (attempt == null) { + continue; + } attempt.setHeadroom(Resources.subtract(clusterResource, usedResource)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java index 4ce6fba6e2d..fcd5041e425 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java @@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -66,7 +67,7 @@ public class TestFifoScheduler { private final int GB = 1024; private static YarnConfiguration conf; - + @BeforeClass public static void setup() { conf = new YarnConfiguration(); @@ -213,6 +214,32 @@ public void test() throws Exception { rm.stop(); } + @Test + public void testNodeUpdateBeforeAppAttemptInit() throws Exception { + FifoScheduler scheduler = new FifoScheduler(); + MockRM rm = new MockRM(conf); + scheduler.reinitialize(conf, rm.getRMContext()); + + RMNode node = MockNodes.newNodeInfo(1, + Resources.createResource(1024, 4), 1, "127.0.0.1"); + scheduler.handle(new NodeAddedSchedulerEvent(node)); + + ApplicationId appId = ApplicationId.newInstance(0, 1); + scheduler.addApplication(appId, "queue1", "user1"); + + NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); + try { + scheduler.handle(updateEvent); + } catch (NullPointerException e) { + Assert.fail(); + } + + ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appId, 1); + scheduler.addApplicationAttempt(attId, false); + + rm.stop(); + } + private void testMinimumAllocation(YarnConfiguration conf, int testAlloc) throws Exception { MockRM rm = new MockRM(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index 6b7b464d559..bcd8c1b5086 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -264,31 +264,36 @@ public void testNMTokensRebindOnAMRestart() throws Exception { nm2.registerNode(); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); - int NUM_CONTAINERS = 1; List containers = new ArrayList(); // nmTokens keeps track of all the nmTokens issued in the allocate call. List expectedNMTokens = new ArrayList(); - // am1 allocate 1 container on nm1. + // am1 allocate 2 container on nm1. + // first container while (true) { AllocateResponse response = - am1.allocate("127.0.0.1", 2000, NUM_CONTAINERS, + am1.allocate("127.0.0.1", 2000, 2, new ArrayList()); nm1.nodeHeartbeat(true); containers.addAll(response.getAllocatedContainers()); expectedNMTokens.addAll(response.getNMTokens()); - if (containers.size() == NUM_CONTAINERS) { + if (containers.size() == 2) { break; } Thread.sleep(200); System.out.println("Waiting for container to be allocated."); } - // launch the container + // launch the container-2 nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING); ContainerId containerId2 = ContainerId.newInstance(am1.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING); - + // launch the container-3 + nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING); + ContainerId containerId3 = + ContainerId.newInstance(am1.getApplicationAttemptId(), 3); + rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING); + // fail am1 nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); @@ -308,12 +313,12 @@ public void testNMTokensRebindOnAMRestart() throws Exception { containers = new ArrayList(); while (true) { AllocateResponse allocateResponse = - am2.allocate("127.1.1.1", 4000, NUM_CONTAINERS, + am2.allocate("127.1.1.1", 4000, 1, new ArrayList()); nm2.nodeHeartbeat(true); containers.addAll(allocateResponse.getAllocatedContainers()); expectedNMTokens.addAll(allocateResponse.getNMTokens()); - if (containers.size() == NUM_CONTAINERS) { + if (containers.size() == 1) { break; } Thread.sleep(200); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java index 713962b2b3d..99b2f2ec3a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java @@ -115,6 +115,7 @@ public void testIgnore() { int[][] qData = new int[][]{ // / A B C { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap { 100, 0, 60, 40 }, // used { 0, 0, 0, 0 }, // pending { 0, 0, 0, 0 }, // reserved @@ -133,6 +134,7 @@ public void testProportionalPreemption() { int[][] qData = new int[][]{ // / A B C D { 100, 10, 40, 20, 30 }, // abs + { 100, 100, 100, 100, 100 }, // maxCap { 100, 30, 60, 10, 0 }, // used { 45, 20, 5, 20, 0 }, // pending { 0, 0, 0, 0, 0 }, // reserved @@ -144,12 +146,33 @@ public void testProportionalPreemption() { policy.editSchedule(); verify(mDisp, times(16)).handle(argThat(new IsPreemptionRequestFor(appA))); } + + @Test + public void testMaxCap() { + int[][] qData = new int[][]{ + // / A B C + { 100, 40, 40, 20 }, // abs + { 100, 100, 45, 100 }, // maxCap + { 100, 55, 45, 0 }, // used + { 20, 10, 10, 0 }, // pending + { 0, 0, 0, 0 }, // reserved + { 2, 1, 1, 0 }, // apps + { -1, 1, 1, 0 }, // req granularity + { 3, 0, 0, 0 }, // subqueues + }; + ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); + policy.editSchedule(); + // despite the imbalance, since B is at maxCap, do not correct + verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); + } + @Test public void testPreemptCycle() { int[][] qData = new int[][]{ // / A B C { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap { 100, 0, 60, 40 }, // used { 10, 10, 0, 0 }, // pending { 0, 0, 0, 0 }, // reserved @@ -169,6 +192,7 @@ public void testExpireKill() { int[][] qData = new int[][]{ // / A B C { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap { 100, 0, 60, 40 }, // used { 10, 10, 0, 0 }, // pending { 0, 0, 0, 0 }, // reserved @@ -205,6 +229,7 @@ public void testDeadzone() { int[][] qData = new int[][]{ // / A B C { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap { 100, 39, 43, 21 }, // used { 10, 10, 0, 0 }, // pending { 0, 0, 0, 0 }, // reserved @@ -224,6 +249,7 @@ public void testOverCapacityImbalance() { int[][] qData = new int[][]{ // / A B C { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap { 100, 55, 45, 0 }, // used { 20, 10, 10, 0 }, // pending { 0, 0, 0, 0 }, // reserved @@ -242,6 +268,7 @@ public void testNaturalTermination() { int[][] qData = new int[][]{ // / A B C { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap { 100, 55, 45, 0 }, // used { 20, 10, 10, 0 }, // pending { 0, 0, 0, 0 }, // reserved @@ -261,6 +288,7 @@ public void testObserveOnly() { int[][] qData = new int[][]{ // / A B C { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap { 100, 90, 10, 0 }, // used { 80, 10, 20, 50 }, // pending { 0, 0, 0, 0 }, // reserved @@ -280,6 +308,7 @@ public void testHierarchical() { int[][] qData = new int[][] { // / A B C D E F { 200, 100, 50, 50, 100, 10, 90 }, // abs + { 200, 200, 200, 200, 200, 200, 200 }, // maxCap { 200, 110, 60, 50, 90, 90, 0 }, // used { 10, 0, 0, 0, 10, 0, 10 }, // pending { 0, 0, 0, 0, 0, 0, 0 }, // reserved @@ -294,11 +323,55 @@ public void testHierarchical() { verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA))); } + @Test + public void testZeroGuar() { + int[][] qData = new int[][] { + // / A B C D E F + { 200, 100, 0, 99, 100, 10, 90 }, // abs + { 200, 200, 200, 200, 200, 200, 200 }, // maxCap + { 170, 80, 60, 20, 90, 90, 0 }, // used + { 10, 0, 0, 0, 10, 0, 10 }, // pending + { 0, 0, 0, 0, 0, 0, 0 }, // reserved + { 4, 2, 1, 1, 2, 1, 1 }, // apps + { -1, -1, 1, 1, -1, 1, 1 }, // req granularity + { 2, 2, 0, 0, 2, 0, 0 }, // subqueues + }; + ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); + policy.editSchedule(); + // verify capacity taken from A1, not B1 despite B1 being far over + // its absolute guaranteed capacity + verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); + } + + @Test + public void testZeroGuarOverCap() { + int[][] qData = new int[][] { + // / A B C D E F + { 200, 100, 0, 99, 0, 100, 100 }, // abs + { 200, 200, 200, 200, 200, 200, 200 }, // maxCap + { 170, 170, 60, 20, 90, 0, 0 }, // used + { 85, 50, 30, 10, 10, 20, 20 }, // pending + { 0, 0, 0, 0, 0, 0, 0 }, // reserved + { 4, 3, 1, 1, 1, 1, 1 }, // apps + { -1, -1, 1, 1, 1, -1, 1 }, // req granularity + { 2, 3, 0, 0, 0, 1, 0 }, // subqueues + }; + ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); + policy.editSchedule(); + // we verify both that C has priority on B and D (has it has >0 guarantees) + // and that B and D are force to share their over capacity fairly (as they + // are both zero-guarantees) hence D sees some of its containers preempted + verify(mDisp, times(14)).handle(argThat(new IsPreemptionRequestFor(appC))); + } + + + @Test public void testHierarchicalLarge() { int[][] qData = new int[][] { // / A B C D E F G H I - { 400, 200, 60,140, 100, 70, 30, 100, 10, 90 }, // abs + { 400, 200, 60, 140, 100, 70, 30, 100, 10, 90 }, // abs + { 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, }, // maxCap { 400, 210, 70,140, 100, 50, 50, 90, 90, 0 }, // used { 10, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, // pending { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved @@ -382,24 +455,25 @@ ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData) { when(mCS.getRootQueue()).thenReturn(mRoot); Resource clusterResources = - Resource.newInstance(leafAbsCapacities(qData[0], qData[6]), 0); + Resource.newInstance(leafAbsCapacities(qData[0], qData[7]), 0); when(mCS.getClusterResources()).thenReturn(clusterResources); return policy; } ParentQueue buildMockRootQueue(Random r, int[]... queueData) { int[] abs = queueData[0]; - int[] used = queueData[1]; - int[] pending = queueData[2]; - int[] reserved = queueData[3]; - int[] apps = queueData[4]; - int[] gran = queueData[5]; - int[] queues = queueData[6]; + int[] maxCap = queueData[1]; + int[] used = queueData[2]; + int[] pending = queueData[3]; + int[] reserved = queueData[4]; + int[] apps = queueData[5]; + int[] gran = queueData[6]; + int[] queues = queueData[7]; - return mockNested(abs, used, pending, reserved, apps, gran, queues); + return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues); } - ParentQueue mockNested(int[] abs, int[] used, + ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, int[] pending, int[] reserved, int[] apps, int[] gran, int[] queues) { float tot = leafAbsCapacities(abs, queues); Deque pqs = new LinkedList(); @@ -407,6 +481,8 @@ ParentQueue mockNested(int[] abs, int[] used, when(root.getQueueName()).thenReturn("/"); when(root.getAbsoluteUsedCapacity()).thenReturn(used[0] / tot); when(root.getAbsoluteCapacity()).thenReturn(abs[0] / tot); + when(root.getAbsoluteMaximumCapacity()).thenReturn(maxCap[0] / tot); + for (int i = 1; i < queues.length; ++i) { final CSQueue q; final ParentQueue p = pqs.removeLast(); @@ -420,6 +496,7 @@ ParentQueue mockNested(int[] abs, int[] used, when(q.getQueueName()).thenReturn(queueName); when(q.getAbsoluteUsedCapacity()).thenReturn(used[i] / tot); when(q.getAbsoluteCapacity()).thenReturn(abs[i] / tot); + when(q.getAbsoluteMaximumCapacity()).thenReturn(maxCap[i] / tot); } assert 0 == pqs.size(); return root; @@ -439,7 +516,7 @@ ParentQueue mockParentQueue(ParentQueue p, int subqueues, return pq; } - LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, + LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, int[] used, int[] pending, int[] reserved, int[] apps, int[] gran) { LeafQueue lq = mock(LeafQueue.class); when(lq.getTotalResourcePending()).thenReturn(