From af1ac9a5e8d8d97a855940d853dd59ab4666f6e2 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 4 Oct 2013 17:46:18 +0000 Subject: [PATCH] HDFS-5119. Persist CacheManager state in the edit log. (Contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1529238 13f79535-47bb-0310-9956-ffa450edef68 --- .../main/java/org/apache/hadoop/io/Text.java | 5 +- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../hadoop/hdfs/protocol/CachePoolInfo.java | 46 +++ .../hadoop/hdfs/protocol/LayoutVersion.java | 3 +- .../hdfs/protocol/PathBasedCacheEntry.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 305 +++++++++++++--- .../hdfs/server/namenode/CachePool.java | 53 ++- .../hdfs/server/namenode/FSEditLog.java | 56 ++- .../hdfs/server/namenode/FSEditLogLoader.java | 58 +++ .../hdfs/server/namenode/FSEditLogOp.java | 344 +++++++++++++++++- .../server/namenode/FSEditLogOpCodes.java | 8 +- .../hdfs/server/namenode/FSImageFormat.java | 12 + .../hdfs/server/namenode/FSNamesystem.java | 30 +- .../namenode/startupprogress/StepType.java | 12 +- .../ImageLoaderCurrent.java | 39 +- .../offlineImageViewer/ImageVisitor.java | 13 +- .../TestCacheReplicationManager.java | 85 ++++- .../namenode/OfflineEditsViewerHelper.java | 19 + .../src/test/resources/editsStored.xml | 274 ++++++++------ 19 files changed, 1153 insertions(+), 214 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java index a5c8b1ecd5c..e4490f1e34e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java @@ -454,10 +454,7 @@ public class Text extends BinaryComparable /** Read a UTF8 encoded string from in */ public static String readString(DataInput in) throws IOException { - int length = WritableUtils.readVInt(in); - byte [] bytes = new byte[length]; - in.readFully(bytes, 0, length); - return decode(bytes); + return readString(in, Integer.MAX_VALUE); } /** Read a UTF8 encoded string with a maximum size diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index cc6737bc97f..c9b84d9aa66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -48,6 +48,9 @@ HDFS-4949 (Unreleased) HDFS-5191. Revisit zero-copy API in FSDataInputStream to make it more intuitive. (Contributed by Colin Patrick McCabe) + HDFS-5119. Persist CacheManager state in the edit log. + (Contributed by Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index c07274b35a2..d6894a7c044 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.protocol; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import javax.annotation.Nullable; @@ -27,6 +29,7 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.Text; /** * Information about a cache pool. @@ -145,4 +148,47 @@ public class CachePoolInfo { throw new IOException("invalid empty cache pool name"); } } + + public static CachePoolInfo readFrom(DataInput in) throws IOException { + String poolName = Text.readString(in); + CachePoolInfo info = new CachePoolInfo(poolName); + if (in.readBoolean()) { + info.setOwnerName(Text.readString(in)); + } + if (in.readBoolean()) { + info.setGroupName(Text.readString(in)); + } + if (in.readBoolean()) { + info.setMode(FsPermission.read(in)); + } + if (in.readBoolean()) { + info.setWeight(in.readInt()); + } + return info; + } + + public void writeTo(DataOutput out) throws IOException { + Text.writeString(out, poolName); + boolean hasOwner, hasGroup, hasMode, hasWeight; + hasOwner = ownerName != null; + hasGroup = groupName != null; + hasMode = mode != null; + hasWeight = weight != null; + out.writeBoolean(hasOwner); + if (hasOwner) { + Text.writeString(out, ownerName); + } + out.writeBoolean(hasGroup); + if (hasGroup) { + Text.writeString(out, groupName); + } + out.writeBoolean(hasMode); + if (hasMode) { + mode.write(out); + } + out.writeBoolean(hasWeight); + if (hasWeight) { + out.writeInt(weight); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java index d4c62c4c710..09333e5e21e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java @@ -106,7 +106,8 @@ public class LayoutVersion { SEQUENTIAL_BLOCK_ID(-46, "Allocate block IDs sequentially and store " + "block IDs in the edits log and image files"), EDITLOG_SUPPORT_RETRYCACHE(-47, "Record ClientId and CallId in editlog to " - + "enable rebuilding retry cache in case of HA failover"); + + "enable rebuilding retry cache in case of HA failover"), + CACHING(-48, "Support for cache pools and path-based caching"); final int lv; final int ancestorLV; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java index 292c3f563c6..b4bd1545e3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java @@ -65,6 +65,6 @@ public final class PathBasedCacheEntry { } public PathBasedCacheDescriptor getDescriptor() { - return new PathBasedCacheDescriptor(entryId, path, pool.getName()); + return new PathBasedCacheDescriptor(entryId, path, pool.getPoolName()); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index ad24227aa06..9e1000934c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; @@ -36,17 +38,24 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; +import org.apache.hadoop.io.Text; + +import com.google.common.base.Preconditions; /** * The Cache Manager handles caching on DataNodes. @@ -94,7 +103,6 @@ public final class CacheManager { final private FSDirectory dir; CacheManager(FSNamesystem namesystem, FSDirectory dir, Configuration conf) { - // TODO: support loading and storing of the CacheManager state clear(); this.namesystem = namesystem; this.dir = dir; @@ -113,13 +121,20 @@ public final class CacheManager { nextEntryId = 1; } - synchronized long getNextEntryId() throws IOException { - if (nextEntryId == Long.MAX_VALUE) { - throw new IOException("no more available IDs"); - } + /** + * Returns the next entry ID to be used for a PathBasedCacheEntry + */ + synchronized long getNextEntryId() { + Preconditions.checkArgument(nextEntryId != Long.MAX_VALUE); return nextEntryId++; } + /** + * Returns the PathBasedCacheEntry corresponding to a PathBasedCacheEntry. + * + * @param directive Lookup directive + * @return Corresponding PathBasedCacheEntry, or null if not present. + */ private synchronized PathBasedCacheEntry findEntry(PathBasedCacheDirective directive) { List existing = @@ -128,13 +143,60 @@ public final class CacheManager { return null; } for (PathBasedCacheEntry entry : existing) { - if (entry.getPool().getName().equals(directive.getPool())) { + if (entry.getPool().getPoolName().equals(directive.getPool())) { return entry; } } return null; } + /** + * Add a new PathBasedCacheEntry, skipping any validation checks. Called + * directly when reloading CacheManager state from FSImage. + * + * @throws IOException if unable to cache the entry + */ + private void unprotectedAddEntry(PathBasedCacheEntry entry) + throws IOException { + assert namesystem.hasWriteLock(); + // Add it to the various maps + entriesById.put(entry.getEntryId(), entry); + String path = entry.getPath(); + List entryList = entriesByPath.get(path); + if (entryList == null) { + entryList = new ArrayList(1); + entriesByPath.put(path, entryList); + } + entryList.add(entry); + // Set the path as cached in the namesystem + try { + INode node = dir.getINode(entry.getPath()); + if (node != null && node.isFile()) { + INodeFile file = node.asFile(); + // TODO: adjustable cache replication factor + namesystem.setCacheReplicationInt(entry.getPath(), + file.getBlockReplication()); + } else { + LOG.warn("Path " + entry.getPath() + " is not a file"); + } + } catch (IOException ioe) { + LOG.info("unprotectedAddEntry " + entry +": failed to cache file: " + + ioe.getClass().getName() +": " + ioe.getMessage()); + throw ioe; + } + } + + /** + * Add a new PathBasedCacheDirective if valid, returning a corresponding + * PathBasedCacheDescriptor to the user. + * + * @param directive Directive describing the cache entry being added + * @param pc Permission checker used to validate that the calling user has + * access to the destination cache pool + * @return Corresponding PathBasedCacheDescriptor for the new cache entry + * @throws IOException if the directive is invalid or was otherwise + * unsuccessful + */ public synchronized PathBasedCacheDescriptor addDirective( PathBasedCacheDirective directive, FSPermissionChecker pc) throws IOException { @@ -162,47 +224,44 @@ public final class CacheManager { "existing directive " + existing + " in this pool."); return existing.getDescriptor(); } - // Add a new entry with the next available ID. - PathBasedCacheEntry entry; - try { - entry = new PathBasedCacheEntry(getNextEntryId(), - directive.getPath(), pool); - } catch (IOException ioe) { - throw new UnexpectedAddPathBasedCacheDirectiveException(directive); - } - LOG.info("addDirective " + directive + ": added cache directive " - + directive); // Success! - // First, add it to the various maps - entriesById.put(entry.getEntryId(), entry); - String path = directive.getPath(); - List entryList = entriesByPath.get(path); - if (entryList == null) { - entryList = new ArrayList(1); - entriesByPath.put(path, entryList); - } - entryList.add(entry); + PathBasedCacheDescriptor d = unprotectedAddDirective(directive); + LOG.info("addDirective " + directive + ": added cache directive " + + directive); + return d; + } + + /** + * Assigns a new entry ID to a validated PathBasedCacheDirective and adds + * it to the CacheManager. Called directly when replaying the edit log. + * + * @param directive Directive being added + * @return PathBasedCacheDescriptor for the directive + * @throws IOException + */ + PathBasedCacheDescriptor unprotectedAddDirective( + PathBasedCacheDirective directive) throws IOException { + assert namesystem.hasWriteLock(); + CachePool pool = cachePools.get(directive.getPool()); + // Add a new entry with the next available ID. + PathBasedCacheEntry entry; + entry = new PathBasedCacheEntry(getNextEntryId(), directive.getPath(), + pool); + + unprotectedAddEntry(entry); - // Next, set the path as cached in the namesystem - try { - INode node = dir.getINode(directive.getPath()); - if (node != null && node.isFile()) { - INodeFile file = node.asFile(); - // TODO: adjustable cache replication factor - namesystem.setCacheReplicationInt(directive.getPath(), - file.getBlockReplication()); - } else { - LOG.warn("Path " + directive.getPath() + " is not a file"); - } - } catch (IOException ioe) { - LOG.info("addDirective " + directive +": failed to cache file: " + - ioe.getClass().getName() +": " + ioe.getMessage()); - throw ioe; - } return entry.getDescriptor(); } + /** + * Remove the PathBasedCacheEntry corresponding to a descriptor ID from + * the CacheManager. + * + * @param id of the PathBasedCacheDescriptor + * @param pc Permissions checker used to validated the request + * @throws IOException + */ public synchronized void removeDescriptor(long id, FSPermissionChecker pc) throws IOException { // Check for invalid IDs. @@ -229,6 +288,20 @@ public final class CacheManager { throw new RemovePermissionDeniedException(id); } + unprotectedRemoveDescriptor(id); + } + + /** + * Unchecked internal method used to remove a PathBasedCacheEntry from the + * CacheManager. Called directly when replaying the edit log. + * + * @param id of the PathBasedCacheDescriptor corresponding to the entry that + * is being removed + * @throws IOException + */ + void unprotectedRemoveDescriptor(long id) throws IOException { + assert namesystem.hasWriteLock(); + PathBasedCacheEntry existing = entriesById.get(id); // Remove the corresponding entry in entriesByPath. String path = existing.getDescriptor().getPath(); List entries = entriesByPath.get(path); @@ -294,11 +367,11 @@ public final class CacheManager { * Create a cache pool. * * Only the superuser should be able to call this function. - * - * @param info - * The info for the cache pool to create. + * + * @param info The info for the cache pool to create. + * @return the created CachePool */ - public synchronized void addCachePool(CachePoolInfo info) + public synchronized CachePool addCachePool(CachePoolInfo info) throws IOException { CachePoolInfo.validate(info); String poolName = info.getPoolName(); @@ -309,8 +382,20 @@ public final class CacheManager { CachePool cachePool = new CachePool(poolName, info.getOwnerName(), info.getGroupName(), info.getMode(), info.getWeight()); - cachePools.put(poolName, cachePool); - LOG.info("created new cache pool " + cachePool); + unprotectedAddCachePool(cachePool); + return cachePool; + } + + /** + * Internal unchecked method used to add a CachePool. Called directly when + * reloading CacheManager state from the FSImage or edit log. + * + * @param pool to be added + */ + void unprotectedAddCachePool(CachePool pool) { + assert namesystem.hasWriteLock(); + cachePools.put(pool.getPoolName(), pool); + LOG.info("created new cache pool " + pool); } /** @@ -409,4 +494,116 @@ public final class CacheManager { } return new BatchedListEntries(results, false); } + + /* + * FSImage related serialization and deserialization code + */ + + /** + * Saves the current state of the CacheManager to the DataOutput. Used + * to persist CacheManager state in the FSImage. + * @param out DataOutput to persist state + * @param sdPath path of the storage directory + * @throws IOException + */ + public synchronized void saveState(DataOutput out, String sdPath) + throws IOException { + out.writeLong(nextEntryId); + savePools(out, sdPath); + saveEntries(out, sdPath); + } + + /** + * Reloads CacheManager state from the passed DataInput. Used during namenode + * startup to restore CacheManager state from an FSImage. + * @param in DataInput from which to restore state + * @throws IOException + */ + public synchronized void loadState(DataInput in) throws IOException { + nextEntryId = in.readLong(); + // pools need to be loaded first since entries point to their parent pool + loadPools(in); + loadEntries(in); + } + + /** + * Save cache pools to fsimage + */ + private synchronized void savePools(DataOutput out, + String sdPath) throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_POOLS, sdPath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size()); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + out.writeInt(cachePools.size()); + for (CachePool pool: cachePools.values()) { + pool.writeTo(out); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + + /* + * Save cache entries to fsimage + */ + private synchronized void saveEntries(DataOutput out, String sdPath) + throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_ENTRIES, sdPath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size()); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + out.writeInt(entriesById.size()); + for (PathBasedCacheEntry entry: entriesById.values()) { + out.writeLong(entry.getEntryId()); + Text.writeString(out, entry.getPath()); + Text.writeString(out, entry.getPool().getPoolName()); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + + /** + * Load cache pools from fsimage + */ + private synchronized void loadPools(DataInput in) + throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_POOLS); + prog.beginStep(Phase.LOADING_FSIMAGE, step); + int numberOfPools = in.readInt(); + prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools); + Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step); + for (int i = 0; i < numberOfPools; i++) { + CachePool pool = CachePool.readFrom(in); + unprotectedAddCachePool(pool); + counter.increment(); + } + prog.endStep(Phase.LOADING_FSIMAGE, step); + } + + /** + * Load cache entries from the fsimage + */ + private synchronized void loadEntries(DataInput in) throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_ENTRIES); + prog.beginStep(Phase.LOADING_FSIMAGE, step); + int numberOfEntries = in.readInt(); + prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfEntries); + Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step); + for (int i = 0; i < numberOfEntries; i++) { + long entryId = in.readLong(); + String path = Text.readString(in); + String poolName = Text.readString(in); + // Get pool reference by looking it up in the map + CachePool pool = cachePools.get(poolName); + PathBasedCacheEntry entry = new PathBasedCacheEntry(entryId, path, pool); + unprotectedAddEntry(entry); + counter.increment(); + } + prog.endStep(Phase.LOADING_FSIMAGE, step); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index b553154c7d9..ff580f032df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import javax.annotation.Nonnull; @@ -26,8 +28,15 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.util.XMLUtils; +import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; +import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; +import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; +import org.xml.sax.ContentHandler; +import org.xml.sax.SAXException; /** * A CachePool describes a set of cache resources being managed by the NameNode. @@ -63,7 +72,7 @@ public final class CachePool { private FsPermission mode; private int weight; - + public CachePool(String poolName, String ownerName, String groupName, FsPermission mode, Integer weight) throws IOException { this.poolName = poolName; @@ -86,10 +95,10 @@ public final class CachePool { } this.mode = mode != null ? new FsPermission(mode): FsPermission.getCachePoolDefault(); - this.weight = weight != null ? weight : 100; + this.weight = weight != null ? weight : DEFAULT_WEIGHT; } - public String getName() { + public String getPoolName() { return poolName; } @@ -162,4 +171,42 @@ public final class CachePool { append(", weight:").append(weight). append(" }").toString(); } + + public void writeTo(DataOutput out) throws IOException { + Text.writeString(out, poolName); + PermissionStatus perm = PermissionStatus.createImmutable( + ownerName, groupName, mode); + perm.write(out); + out.writeInt(weight); + } + + public static CachePool readFrom(DataInput in) throws IOException { + String poolName = Text.readString(in); + PermissionStatus perm = PermissionStatus.read(in); + int weight = in.readInt(); + return new CachePool(poolName, perm.getUserName(), perm.getGroupName(), + perm.getPermission(), weight); + } + + public void writeXmlTo(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "POOLNAME", poolName); + PermissionStatus perm = new PermissionStatus(ownerName, + groupName, mode); + FSEditLogOp.permissionStatusToXml(contentHandler, perm); + XMLUtils.addSaxString(contentHandler, "WEIGHT", Integer.toString(weight)); + } + + public static CachePool readXmlFrom(Stanza st) throws InvalidXmlException { + String poolName = st.getValue("POOLNAME"); + PermissionStatus perm = FSEditLogOp.permissionStatusFromXml(st); + int weight = Integer.parseInt(st.getValue("WEIGHT")); + try { + return new CachePool(poolName, perm.getUserName(), perm.getGroupName(), + perm.getPermission(), weight); + } catch (IOException e) { + String error = "Invalid cache pool XML, missing fields."; + LOG.warn(error); + throw new InvalidXmlException(error); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 60ffe7ac172..3289799fb5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.Time.now; import java.io.IOException; @@ -35,15 +36,18 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; - -import static org.apache.hadoop.util.ExitUtil.terminate; - import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CloseOp; @@ -55,12 +59,17 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.LogSegmentOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDescriptorOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp; @@ -69,9 +78,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; @@ -948,6 +954,44 @@ public class FSEditLog implements LogsPurgeable { logEdit(op); } + void logAddPathBasedCacheDirective(PathBasedCacheDirective directive, + boolean toLogRpcIds) { + AddPathBasedCacheDirectiveOp op = AddPathBasedCacheDirectiveOp.getInstance( + cache.get()) + .setPath(directive.getPath()) + .setPool(directive.getPool()); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logRemovePathBasedCacheDescriptor(Long id, boolean toLogRpcIds) { + RemovePathBasedCacheDescriptorOp op = + RemovePathBasedCacheDescriptorOp.getInstance(cache.get()).setId(id); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logAddCachePool(CachePool pool, boolean toLogRpcIds) { + AddCachePoolOp op = + AddCachePoolOp.getInstance(cache.get()).setPool(pool); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logModifyCachePool(CachePoolInfo info, boolean toLogRpcIds) { + ModifyCachePoolOp op = + ModifyCachePoolOp.getInstance(cache.get()).setInfo(info); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logRemoveCachePool(String poolName, boolean toLogRpcIds) { + RemoveCachePoolOp op = + RemoveCachePoolOp.getInstance(cache.get()).setPoolName(poolName); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + /** * Get all the journals this edit log is currently operating on. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 03a1dbc1fbd..3233c1eb419 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -36,10 +36,14 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp; @@ -52,7 +56,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDescriptorOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; @@ -76,6 +83,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Co import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.Holder; +import org.apache.jasper.tagplugins.jstl.core.Remove; import com.google.common.base.Joiner; @@ -631,6 +639,56 @@ public class FSEditLogLoader { fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId); break; } + case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: { + AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op; + PathBasedCacheDirective d = new PathBasedCacheDirective(addOp.path, + addOp.pool); + PathBasedCacheDescriptor descriptor = + fsNamesys.getCacheManager().unprotectedAddDirective(d); + + if (toAddRetryCache) { + fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, + descriptor); + } + break; + } + case OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR: { + RemovePathBasedCacheDescriptorOp removeOp = + (RemovePathBasedCacheDescriptorOp) op; + fsNamesys.getCacheManager().unprotectedRemoveDescriptor(removeOp.id); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } + case OP_ADD_CACHE_POOL: { + AddCachePoolOp addOp = (AddCachePoolOp) op; + fsNamesys.getCacheManager().unprotectedAddCachePool(addOp.pool); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } + case OP_MODIFY_CACHE_POOL: { + ModifyCachePoolOp modifyOp = (ModifyCachePoolOp) op; + fsNamesys.getCacheManager().modifyCachePool(modifyOp.info); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } + case OP_REMOVE_CACHE_POOL: { + RemoveCachePoolOp removeOp = (RemoveCachePoolOp) op; + fsNamesys.getCacheManager().removeCachePool(removeOp.poolName); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } default: throw new IOException("Invalid operation read " + op.opCode); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 10432bfd8e1..da5a04a2094 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_PATH_BASED_CACHE_DIRECTIVE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN; @@ -32,7 +34,10 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_END_LOG import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_INVALID; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MKDIR; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT; @@ -56,6 +61,7 @@ import java.io.DataOutput; import java.io.DataOutputStream; import java.io.EOFException; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.EnumMap; import java.util.List; @@ -73,6 +79,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; @@ -97,7 +104,9 @@ import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; import org.xml.sax.helpers.AttributesImpl; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; /** * Helper classes for reading the ops from an InputStream. @@ -153,6 +162,13 @@ public abstract class FSEditLogOp { inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp()); inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op()); inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp()); + inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE, + new AddPathBasedCacheDirectiveOp()); + inst.put(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR, + new RemovePathBasedCacheDescriptorOp()); + inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp()); + inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp()); + inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp()); } public FSEditLogOp get(FSEditLogOpCodes opcode) { @@ -528,8 +544,7 @@ public abstract class FSEditLogOp { } else { this.blocks = new Block[0]; } - this.permissions = - permissionStatusFromXml(st.getChildren("PERMISSION_STATUS").get(0)); + this.permissions = permissionStatusFromXml(st); readRpcIdsFromXml(st); } } @@ -1208,8 +1223,7 @@ public abstract class FSEditLogOp { this.inodeId = Long.valueOf(st.getValue("INODEID")); this.path = st.getValue("PATH"); this.timestamp = Long.valueOf(st.getValue("TIMESTAMP")); - this.permissions = - permissionStatusFromXml(st.getChildren("PERMISSION_STATUS").get(0)); + this.permissions = permissionStatusFromXml(st); } } @@ -1940,8 +1954,7 @@ public abstract class FSEditLogOp { this.value = st.getValue("VALUE"); this.mtime = Long.valueOf(st.getValue("MTIME")); this.atime = Long.valueOf(st.getValue("ATIME")); - this.permissionStatus = - permissionStatusFromXml(st.getChildren("PERMISSION_STATUS").get(0)); + this.permissionStatus = permissionStatusFromXml(st); readRpcIdsFromXml(st); } @@ -2848,6 +2861,266 @@ public abstract class FSEditLogOp { } } + static class AddPathBasedCacheDirectiveOp extends FSEditLogOp { + + String path; + String pool; + + public AddPathBasedCacheDirectiveOp() { + super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE); + } + + static AddPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) { + return (AddPathBasedCacheDirectiveOp) cache + .get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE); + } + + public AddPathBasedCacheDirectiveOp setPath(String path) { + this.path = path; + return this; + } + + public AddPathBasedCacheDirectiveOp setPool(String pool) { + this.pool = pool; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + this.path = FSImageSerialization.readString(in); + this.pool = FSImageSerialization.readString(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + FSImageSerialization.writeString(path, out); + FSImageSerialization.writeString(pool, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "PATH", path); + XMLUtils.addSaxString(contentHandler, "POOL", pool); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + path = st.getValue("PATH"); + pool = st.getValue("POOL"); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("AddPathBasedCacheDirective ["); + builder.append("path=" + path + ","); + builder.append("pool=" + pool + "]"); + return builder.toString(); + } + } + + static class RemovePathBasedCacheDescriptorOp extends FSEditLogOp { + long id; + + public RemovePathBasedCacheDescriptorOp() { + super(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR); + } + + static RemovePathBasedCacheDescriptorOp getInstance(OpInstanceCache cache) { + return (RemovePathBasedCacheDescriptorOp) cache + .get(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR); + } + + public RemovePathBasedCacheDescriptorOp setId(long id) { + this.id = id; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + this.id = FSImageSerialization.readLong(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + FSImageSerialization.writeLong(id, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "ID", Long.toString(id)); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.id = Long.parseLong(st.getValue("ID")); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("RemovePathBasedCacheDescriptor ["); + builder.append("id=" + Long.toString(id) + "]"); + return builder.toString(); + } + } + + static class AddCachePoolOp extends FSEditLogOp { + CachePool pool; + + public AddCachePoolOp() { + super(OP_ADD_CACHE_POOL); + } + + static AddCachePoolOp getInstance(OpInstanceCache cache) { + return (AddCachePoolOp) cache.get(OP_ADD_CACHE_POOL); + } + + public AddCachePoolOp setPool(CachePool pool) { + this.pool = pool; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + pool = CachePool.readFrom(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + pool.writeTo(out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + pool.writeXmlTo(contentHandler); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.pool = CachePool.readXmlFrom(st); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("AddCachePoolOp ["); + builder.append("poolName=" + pool.getPoolName() + ","); + builder.append("ownerName=" + pool.getOwnerName() + ","); + builder.append("groupName=" + pool.getGroupName() + ","); + builder.append("mode=" + Short.toString(pool.getMode().toShort()) + ","); + builder.append("weight=" + Integer.toString(pool.getWeight()) + "]"); + return builder.toString(); + } + } + + static class ModifyCachePoolOp extends FSEditLogOp { + CachePoolInfo info; + + public ModifyCachePoolOp() { + super(OP_MODIFY_CACHE_POOL); + } + + static ModifyCachePoolOp getInstance(OpInstanceCache cache) { + return (ModifyCachePoolOp) cache.get(OP_MODIFY_CACHE_POOL); + } + + public ModifyCachePoolOp setInfo(CachePoolInfo info) { + this.info = info; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + info = CachePoolInfo.readFrom(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + info.writeTo(out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + cachePoolInfoToXml(contentHandler, info); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.info = cachePoolInfoFromXml(st); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("ModifyCachePoolOp ["); + ArrayList fields = new ArrayList(5); + if (info.getPoolName() != null) { + fields.add("poolName=" + info.getPoolName()); + } + if (info.getOwnerName() != null) { + fields.add("ownerName=" + info.getOwnerName()); + } + if (info.getGroupName() != null) { + fields.add("groupName=" + info.getGroupName()); + } + if (info.getMode() != null) { + fields.add("mode=" + info.getMode().toString()); + } + if (info.getWeight() != null) { + fields.add("weight=" + info.getWeight()); + } + builder.append(Joiner.on(",").join(fields)); + builder.append("]"); + return builder.toString(); + } + } + + static class RemoveCachePoolOp extends FSEditLogOp { + String poolName; + + public RemoveCachePoolOp() { + super(OP_REMOVE_CACHE_POOL); + } + + static RemoveCachePoolOp getInstance(OpInstanceCache cache) { + return (RemoveCachePoolOp) cache.get(OP_REMOVE_CACHE_POOL); + } + + public RemoveCachePoolOp setPoolName(String poolName) { + this.poolName = poolName; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + poolName = FSImageSerialization.readString(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + FSImageSerialization.writeString(poolName, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "POOLNAME", poolName); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.poolName = st.getValue("POOLNAME"); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("RemoveCachePoolOp ["); + builder.append("poolName=" + poolName + "]"); + return builder.toString(); + } + } + static private short readShort(DataInputStream in) throws IOException { return Short.parseShort(FSImageSerialization.readString(in)); } @@ -3235,16 +3508,65 @@ public abstract class FSEditLogOp { contentHandler.startElement("", "", "PERMISSION_STATUS", new AttributesImpl()); XMLUtils.addSaxString(contentHandler, "USERNAME", perm.getUserName()); XMLUtils.addSaxString(contentHandler, "GROUPNAME", perm.getGroupName()); - XMLUtils.addSaxString(contentHandler, "MODE", - Short.valueOf(perm.getPermission().toShort()).toString()); + fsPermissionToXml(contentHandler, perm.getPermission()); contentHandler.endElement("", "", "PERMISSION_STATUS"); } public static PermissionStatus permissionStatusFromXml(Stanza st) throws InvalidXmlException { - String username = st.getValue("USERNAME"); - String groupname = st.getValue("GROUPNAME"); + Stanza status = st.getChildren("PERMISSION_STATUS").get(0); + String username = status.getValue("USERNAME"); + String groupname = status.getValue("GROUPNAME"); + FsPermission mode = fsPermissionFromXml(status); + return new PermissionStatus(username, groupname, mode); + } + + public static void fsPermissionToXml(ContentHandler contentHandler, + FsPermission mode) throws SAXException { + XMLUtils.addSaxString(contentHandler, "MODE", Short.valueOf(mode.toShort()) + .toString()); + } + + public static FsPermission fsPermissionFromXml(Stanza st) + throws InvalidXmlException { short mode = Short.valueOf(st.getValue("MODE")); - return new PermissionStatus(username, groupname, new FsPermission(mode)); + return new FsPermission(mode); + } + + public static void cachePoolInfoToXml(ContentHandler contentHandler, + CachePoolInfo info) throws SAXException { + XMLUtils.addSaxString(contentHandler, "POOLNAME", info.getPoolName()); + if (info.getOwnerName() != null) { + XMLUtils.addSaxString(contentHandler, "OWNERNAME", info.getOwnerName()); + } + if (info.getGroupName() != null) { + XMLUtils.addSaxString(contentHandler, "GROUPNAME", info.getGroupName()); + } + if (info.getMode() != null) { + fsPermissionToXml(contentHandler, info.getMode()); + } + if (info.getWeight() != null) { + XMLUtils.addSaxString(contentHandler, "WEIGHT", + Integer.toString(info.getWeight())); + } + } + + public static CachePoolInfo cachePoolInfoFromXml(Stanza st) + throws InvalidXmlException { + String poolName = st.getValue("POOLNAME"); + CachePoolInfo info = new CachePoolInfo(poolName); + if (st.hasChildren("OWNERNAME")) { + info.setOwnerName(st.getValue("OWNERNAME")); + } + if (st.hasChildren("GROUPNAME")) { + info.setGroupName(st.getValue("GROUPNAME")); + } + if (st.hasChildren("MODE")) { + info.setMode(FSEditLogOp.fsPermissionFromXml(st)); + } + if (st.hasChildren("WEIGHT")) { + info.setWeight(Integer.parseInt(st.getValue("WEIGHT"))); + } + return info; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index 751eb10d6c9..b9efc1e16a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -63,7 +63,13 @@ public enum FSEditLogOpCodes { OP_ALLOW_SNAPSHOT ((byte) 29), OP_DISALLOW_SNAPSHOT ((byte) 30), OP_SET_GENSTAMP_V2 ((byte) 31), - OP_ALLOCATE_BLOCK_ID ((byte) 32); + OP_ALLOCATE_BLOCK_ID ((byte) 32), + OP_ADD_PATH_BASED_CACHE_DIRECTIVE ((byte) 33), + OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR ((byte) 34), + OP_ADD_CACHE_POOL ((byte) 35), + OP_MODIFY_CACHE_POOL ((byte) 36), + OP_REMOVE_CACHE_POOL ((byte) 37); + private byte opCode; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 74f5219c491..34659fbc7cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -351,6 +351,8 @@ public class FSImageFormat { loadSecretManagerState(in); + loadCacheManagerState(in); + // make sure to read to the end of file boolean eof = (in.read() == -1); assert eof : "Should have reached the end of image file " + curFile; @@ -843,6 +845,14 @@ public class FSImageFormat { namesystem.loadSecretManagerState(in); } + private void loadCacheManagerState(DataInput in) throws IOException { + int imgVersion = getLayoutVersion(); + if (!LayoutVersion.supports(Feature.CACHING, imgVersion)) { + return; + } + namesystem.getCacheManager().loadState(in); + } + private int getLayoutVersion() { return namesystem.getFSImage().getStorage().getLayoutVersion(); } @@ -985,6 +995,8 @@ public class FSImageFormat { context.checkCancelled(); sourceNamesystem.saveSecretManagerState(out, sdPath); context.checkCancelled(); + sourceNamesystem.getCacheManager().saveState(out, sdPath); + context.checkCancelled(); out.flush(); context.checkCancelled(); fout.getChannel().force(true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 0299ee7a7b3..6c5040989e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -227,7 +227,6 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; @@ -1956,7 +1955,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, getEditLog().logSync(); if (isFile) { - logAuditEvent(true, "setReplication", src); + logAuditEvent(true, "setCacheReplication", src); } return isFile; } @@ -6884,10 +6883,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, PathBasedCacheDescriptor addPathBasedCacheDirective( PathBasedCacheDirective directive) throws IOException { - CacheEntryWithPayload retryCacheEntry = + CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null); - if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { - return (PathBasedCacheDescriptor) retryCacheEntry.getPayload(); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return (PathBasedCacheDescriptor) cacheEntry.getPayload(); } final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; @@ -6902,7 +6901,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, "Cannot add PathBasedCache directive", safeMode); } result = cacheManager.addDirective(directive, pc); - //getEditLog().logAddPathBasedCacheDirective(result); FIXME: HDFS-5119 + getEditLog().logAddPathBasedCacheDirective(directive, + cacheEntry != null); success = true; } finally { writeUnlock(); @@ -6912,14 +6912,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (isAuditEnabled() && isExternalInvocation()) { logAuditEvent(success, "addPathBasedCacheDirective", null, null, null); } - RetryCache.setState(retryCacheEntry, success, result); + RetryCache.setState(cacheEntry, success, result); } return result; } void removePathBasedCacheDescriptor(Long id) throws IOException { - CacheEntry retryCacheEntry = RetryCache.waitForCompletion(retryCache); - if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { return; } final FSPermissionChecker pc = isPermissionEnabled ? @@ -6934,7 +6934,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, "Cannot remove PathBasedCache directives", safeMode); } cacheManager.removeDescriptor(id, pc); - //getEditLog().logRemovePathBasedCacheEntries(results); FIXME: HDFS-5119 + getEditLog().logRemovePathBasedCacheDescriptor(id, cacheEntry != null); success = true; } finally { writeUnlock(); @@ -6942,7 +6942,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, logAuditEvent(success, "removePathBasedCacheDescriptors", null, null, null); } - RetryCache.setState(retryCacheEntry, success); + RetryCache.setState(cacheEntry, success); } getEditLog().logSync(); } @@ -6989,8 +6989,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (pc != null) { pc.checkSuperuserPrivilege(); } - cacheManager.addCachePool(req); - //getEditLog().logAddCachePool(req); // FIXME: HDFS-5119 + CachePool pool = cacheManager.addCachePool(req); + getEditLog().logAddCachePool(pool, cacheEntry != null); success = true; } finally { writeUnlock(); @@ -7023,7 +7023,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, pc.checkSuperuserPrivilege(); } cacheManager.modifyCachePool(req); - //getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119 + getEditLog().logModifyCachePool(req, cacheEntry != null); success = true; } finally { writeUnlock(); @@ -7056,7 +7056,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, pc.checkSuperuserPrivilege(); } cacheManager.removeCachePool(cachePoolName); - //getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119 + getEditLog().logRemoveCachePool(cachePoolName, cacheEntry != null); success = true; } finally { writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java index 2ef9c8e7013..1b43d6a2b09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java @@ -42,7 +42,17 @@ public enum StepType { /** * The namenode is performing an operation related to inodes. */ - INODES("Inodes", "inodes"); + INODES("Inodes", "inodes"), + + /** + * The namenode is performing an operation related to cache pools. + */ + CACHE_POOLS("CachePools", "cache pools"), + + /** + * The namenode is performing an operation related to cache entries. + */ + CACHE_ENTRIES("CacheEntries", "cache entries"); private final String name, description; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index d23b27b7d35..411fc16ab09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader { new SimpleDateFormat("yyyy-MM-dd HH:mm"); private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, - -40, -41, -42, -43, -44, -45, -46, -47 }; + -40, -41, -42, -43, -44, -45, -46, -47, -48 }; private int imageVersion = 0; private final Map subtreeMap = new HashMap(); @@ -216,6 +216,9 @@ class ImageLoaderCurrent implements ImageLoader { processDelegationTokens(in, v); } + if (LayoutVersion.supports(Feature.CACHING, imageVersion)) { + processCacheManagerState(in, v); + } v.leaveEnclosingElement(); // FSImage done = true; } finally { @@ -227,6 +230,24 @@ class ImageLoaderCurrent implements ImageLoader { } } + /** + * Process CacheManager state from the fsimage. + */ + private void processCacheManagerState(DataInputStream in, ImageVisitor v) + throws IOException { + v.visit(ImageElement.CACHE_NEXT_ENTRY_ID, in.readLong()); + final int numPools = in.readInt(); + for (int i=0; i paths = new ArrayList(numFiles); for (int i=0; i pit = dfs.listCachePools(); + assertTrue("No cache pools found", pit.hasNext()); + CachePoolInfo info = pit.next(); + assertEquals(pool, info.getPoolName()); + assertEquals(groupName, info.getGroupName()); + assertEquals(mode, info.getMode()); + assertEquals(weight, (int)info.getWeight()); + assertFalse("Unexpected # of cache pools found", pit.hasNext()); + + // Create some cache entries + int numEntries = 10; + String entryPrefix = "/party-"; + for (int i=0; i dit + = dfs.listPathBasedCacheDescriptors(null, null); + for (int i=0; i - -47 + -48 OP_START_LOG_SEGMENT @@ -13,8 +13,8 @@ 2 1 - 1375509063810 - 4d47710649039b98 + 1381014414770 + 0ed3ccccde5c0830 @@ -24,8 +24,8 @@ 3 2 - 1375509063812 - 38cbb1d8fd90fcb2 + 1381014414779 + 1619312c238cd1b1 @@ -37,18 +37,18 @@ 16386 /file_create_u\0001;F431 1 - 1374817864805 - 1374817864805 + 1380323216882 + 1380323216882 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 8 + ff07f00d-efa9-4b76-a064-63604cd3286e + 7 @@ -59,13 +59,13 @@ 0 /file_create_u\0001;F431 1 - 1374817864816 - 1374817864805 + 1380323216937 + 1380323216882 512 - jing + andrew supergroup 420 @@ -78,9 +78,9 @@ 0 /file_create_u\0001;F431 /file_moved - 1374817864818 - 5245793a-984b-4264-8d7c-7890775547a0 - 10 + 1380323216955 + ff07f00d-efa9-4b76-a064-63604cd3286e + 9 @@ -89,9 +89,9 @@ 7 0 /file_moved - 1374817864822 - 5245793a-984b-4264-8d7c-7890775547a0 - 11 + 1380323216966 + ff07f00d-efa9-4b76-a064-63604cd3286e + 10 @@ -101,9 +101,9 @@ 0 16387 /directory_mkdir - 1374817864825 + 1380323216981 - jing + andrew supergroup 493 @@ -136,8 +136,8 @@ 12 /directory_mkdir snapshot1 - 5245793a-984b-4264-8d7c-7890775547a0 - 16 + ff07f00d-efa9-4b76-a064-63604cd3286e + 15 @@ -147,8 +147,8 @@ /directory_mkdir snapshot1 snapshot2 - 5245793a-984b-4264-8d7c-7890775547a0 - 17 + ff07f00d-efa9-4b76-a064-63604cd3286e + 16 @@ -157,8 +157,8 @@ 14 /directory_mkdir snapshot2 - 5245793a-984b-4264-8d7c-7890775547a0 - 18 + ff07f00d-efa9-4b76-a064-63604cd3286e + 17 @@ -169,18 +169,18 @@ 16388 /file_create_u\0001;F431 1 - 1374817864846 - 1374817864846 + 1380323217070 + 1380323217070 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 19 + ff07f00d-efa9-4b76-a064-63604cd3286e + 18 @@ -191,13 +191,13 @@ 0 /file_create_u\0001;F431 1 - 1374817864848 - 1374817864846 + 1380323217079 + 1380323217070 512 - jing + andrew supergroup 420 @@ -253,10 +253,10 @@ 0 /file_create_u\0001;F431 /file_moved - 1374817864860 + 1380323217151 NONE - 5245793a-984b-4264-8d7c-7890775547a0 - 26 + ff07f00d-efa9-4b76-a064-63604cd3286e + 25 @@ -267,18 +267,18 @@ 16389 /file_concat_target 1 - 1374817864864 - 1374817864864 + 1380323217170 + 1380323217170 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 28 + ff07f00d-efa9-4b76-a064-63604cd3286e + 27 @@ -388,8 +388,8 @@ 0 /file_concat_target 1 - 1374817864927 - 1374817864864 + 1380323217424 + 1380323217170 512 @@ -409,7 +409,7 @@ 1003 - jing + andrew supergroup 420 @@ -423,18 +423,18 @@ 16390 /file_concat_0 1 - 1374817864929 - 1374817864929 + 1380323217436 + 1380323217436 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 41 + ff07f00d-efa9-4b76-a064-63604cd3286e + 40 @@ -544,8 +544,8 @@ 0 /file_concat_0 1 - 1374817864947 - 1374817864929 + 1380323217529 + 1380323217436 512 @@ -565,7 +565,7 @@ 1006 - jing + andrew supergroup 420 @@ -579,18 +579,18 @@ 16391 /file_concat_1 1 - 1374817864950 - 1374817864950 + 1380323217542 + 1380323217542 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 53 + ff07f00d-efa9-4b76-a064-63604cd3286e + 52 @@ -700,8 +700,8 @@ 0 /file_concat_1 1 - 1374817864966 - 1374817864950 + 1380323217613 + 1380323217542 512 @@ -721,7 +721,7 @@ 1009 - jing + andrew supergroup 420 @@ -733,13 +733,13 @@ 56 0 /file_concat_target - 1374817864967 + 1380323217627 /file_concat_0 /file_concat_1 - 5245793a-984b-4264-8d7c-7890775547a0 - 64 + ff07f00d-efa9-4b76-a064-63604cd3286e + 63 @@ -750,15 +750,15 @@ 16392 /file_symlink /file_concat_target - 1374817864971 - 1374817864971 + 1380323217643 + 1380323217643 - jing + andrew supergroup 511 - 5245793a-984b-4264-8d7c-7890775547a0 - 65 + ff07f00d-efa9-4b76-a064-63604cd3286e + 64 @@ -768,14 +768,14 @@ HDFS_DELEGATION_TOKEN 1 - jing + andrew JobTracker - 1374817864974 - 1375422664974 + 1380323217655 + 1380928017655 2 - 1374904264974 + 1380409617655 @@ -785,14 +785,14 @@ HDFS_DELEGATION_TOKEN 1 - jing + andrew JobTracker - 1374817864974 - 1375422664974 + 1380323217655 + 1380928017655 2 - 1374904265012 + 1380409617701 @@ -802,55 +802,101 @@ HDFS_DELEGATION_TOKEN 1 - jing + andrew JobTracker - 1374817864974 - 1375422664974 + 1380323217655 + 1380928017655 2 - OP_ADD + OP_ADD_CACHE_POOL 61 + poolparty + + andrew + andrew + 493 + + 100 + + + + OP_MODIFY_CACHE_POOL + + 62 + poolparty + carlton + party + 448 + 1989 + + + + OP_ADD_PATH_BASED_CACHE_DIRECTIVE + + 63 + /bar + poolparty + + + + OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR + + 64 + 1 + + + + OP_REMOVE_CACHE_POOL + + 65 + poolparty + + + + OP_ADD + + 66 0 16393 /hard-lease-recovery-test 1 - 1374817865017 - 1374817865017 + 1380323217822 + 1380323217822 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 69 + ff07f00d-efa9-4b76-a064-63604cd3286e + 73 OP_ALLOCATE_BLOCK_ID - 62 + 67 1073741834 OP_SET_GENSTAMP_V2 - 63 + 68 1010 OP_UPDATE_BLOCKS - 64 + 69 /hard-lease-recovery-test 1073741834 @@ -864,7 +910,7 @@ OP_UPDATE_BLOCKS - 65 + 70 /hard-lease-recovery-test 1073741834 @@ -878,15 +924,31 @@ OP_SET_GENSTAMP_V2 - 66 + 71 1011 OP_REASSIGN_LEASE - 67 - DFSClient_NONMAPREDUCE_-1676409172_1 + 72 + DFSClient_NONMAPREDUCE_1160098410_1 + /hard-lease-recovery-test + HDFS_NameNode + + + + OP_SET_GENSTAMP_V2 + + 73 + 1012 + + + + OP_REASSIGN_LEASE + + 74 + HDFS_NameNode /hard-lease-recovery-test HDFS_NameNode @@ -894,23 +956,23 @@ OP_CLOSE - 68 + 75 0 0 /hard-lease-recovery-test 1 - 1374817867688 - 1374817865017 + 1380323222701 + 1380323217822 512 1073741834 11 - 1011 + 1012 - jing + andrew supergroup 420 @@ -919,7 +981,7 @@ OP_END_LOG_SEGMENT - 69 + 76